code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
import numbers import warnings from contextlib import suppress import numpy as np import scipy.sparse as sp # mypy error: Module 'numpy.core.numeric' has no attribute 'ComplexWarning' from numpy.core.numeric import ComplexWarning # type: ignore from sklearn._config import get_config as _get_config from sklearn.exceptions import DataConversionWarning from sklearn.utils.fixes import _object_dtype_isnan def _assert_all_finite( X, allow_nan=False, msg_dtype=None, estimator_name=None, input_name="" ): """Like assert_all_finite, but only for ndarray.""" # validation is also imported in extmath from sklearn.utils.extmath import _safe_accumulator_op if _get_config()["assume_finite"]: return X = np.asanyarray(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. The sum is also calculated # safely to reduce dtype induced overflows. is_float = X.dtype.kind in "fc" if is_float and (np.isfinite(_safe_accumulator_op(np.sum, X))): pass elif is_float: if ( allow_nan and np.isinf(X).any() or not allow_nan and not np.isfinite(X).all() ): if not allow_nan and np.isnan(X).any(): type_err = "NaN" else: msg_dtype = msg_dtype if msg_dtype is not None else X.dtype type_err = f"infinity or a value too large for {msg_dtype!r}" padded_input_name = input_name + " " if input_name else "" msg_err = f"Input {padded_input_name}contains {type_err}." if ( not allow_nan and estimator_name and input_name == "X" and np.isnan(X).any() ): # Improve the error message on how to handle missing values in # scikit-learn. msg_err += ( f"\n{estimator_name} does not accept missing values" " encoded as NaN natively. For supervised learning, you might want" " to consider sklearn.ensemble.HistGradientBoostingClassifier and" " Regressor which accept missing values encoded as NaNs natively." " Alternatively, it is possible to preprocess the data, for" " instance by using an imputer transformer in a pipeline or drop" " samples with missing values. See" " https://scikit-learn.org/stable/modules/impute.html" ) raise ValueError(msg_err) # for object dtype data, we only check for NaNs (GH-13254) elif X.dtype == np.dtype("object") and not allow_nan: if _object_dtype_isnan(X).any(): raise ValueError("Input contains NaN") def _num_samples(x): """Return number of samples in array-like x.""" message = "Expected sequence or array-like, got %s" % type(x) if hasattr(x, "fit") and callable(x.fit): # Don't get num_samples from an ensembles length! raise TypeError(message) if not hasattr(x, "__len__") and not hasattr(x, "shape"): if hasattr(x, "__array__"): x = np.asarray(x) else: raise TypeError(message) if hasattr(x, "shape") and x.shape is not None: if len(x.shape) == 0: raise TypeError( "Singleton array %r cannot be considered a valid collection." % x ) # Check that shape is returning an integer or default to len # Dask dataframes may not return numeric shape[0] value if isinstance(x.shape[0], numbers.Integral): return x.shape[0] try: return len(x) except TypeError as type_error: raise TypeError(message) from type_error def check_consistent_length(*arrays): """Check that all arrays have consistent first dimensions. Checks whether all objects in arrays have the same shape or length. Parameters ---------- *arrays : list or tuple of input objects. Objects that will be checked for consistent length. """ lengths = [_num_samples(X) for X in arrays if X is not None] uniques = np.unique(lengths) if len(uniques) > 1: int_lengths = [int(length) for length in lengths] raise ValueError( f"Found input variables with inconsistent numbers of samples: {int_lengths!r}" ) def _ensure_sparse_format( spmatrix, accept_sparse, dtype, copy, force_all_finite, accept_large_sparse, estimator_name=None, input_name="", ): """Convert a sparse matrix to a given format. Checks the sparse format of spmatrix and converts if necessary. Parameters ---------- spmatrix : sparse matrix Input to validate and convert. accept_sparse : str, bool or list/tuple of str String[s] representing allowed sparse matrix formats ('csc', 'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. dtype : str, type or None Data type of result. If None, the dtype of the input is preserved. copy : bool Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : bool or 'allow-nan' Whether to raise an error on np.inf, np.nan, pd.NA in X. The possibilities are: - True: Force all values of X to be finite. - False: accepts np.inf, np.nan, pd.NA in X. - 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. .. versionchanged:: 0.23 Accepts `pd.NA` and converts it into `np.nan` estimator_name : str, default=None The estimator name, used to construct the error message. input_name : str, default="" The data name used to construct the error message. In particular if `input_name` is "X" and the data has NaN values and allow_nan is False, the error message will link to the imputer documentation. Returns ------- spmatrix_converted : sparse matrix. Matrix that is ensured to have an allowed type. """ if dtype is None: dtype = spmatrix.dtype changed_format = False if isinstance(accept_sparse, str): accept_sparse = [accept_sparse] # Indices dtype validation _check_large_sparse(spmatrix, accept_large_sparse) if accept_sparse is False: raise TypeError( "A sparse matrix was passed, but dense " "data is required. Use X.toarray() to " "convert to a dense numpy array." ) if isinstance(accept_sparse, (list, tuple)): if len(accept_sparse) == 0: raise ValueError( "When providing 'accept_sparse' " "as a tuple or list, it must contain at " "least one string value." ) # ensure correct sparse format if spmatrix.format not in accept_sparse: # create new with correct sparse spmatrix = spmatrix.asformat(accept_sparse[0]) changed_format = True elif accept_sparse is not True: # any other type raise ValueError( "Parameter 'accept_sparse' should be a string, " "boolean or list of strings. You provided " f"'accept_sparse={accept_sparse}'." ) if dtype != spmatrix.dtype: # convert dtype spmatrix = spmatrix.astype(dtype) elif copy and not changed_format: # force copy spmatrix = spmatrix.copy() if force_all_finite: if not hasattr(spmatrix, "data"): warnings.warn( "Can't check %s sparse matrix for nan or inf." % spmatrix.format, stacklevel=2, ) else: _assert_all_finite( spmatrix.data, allow_nan=force_all_finite == "allow-nan", estimator_name=estimator_name, input_name=input_name, ) return spmatrix def _ensure_no_complex_data(array): if ( hasattr(array, "dtype") and array.dtype is not None and hasattr(array.dtype, "kind") and array.dtype.kind == "c" ): raise ValueError(f"Complex data not supported\n{array}\n") def _check_estimator_name(estimator): if estimator is not None: if isinstance(estimator, str): return estimator return estimator.__class__.__name__ return None def _pandas_dtype_needs_early_conversion(pd_dtype): """Return True if pandas extension pd_dtype need to be converted early.""" # Check these early for pandas versions without extension dtypes from pandas.api.types import ( is_bool_dtype, is_float_dtype, is_integer_dtype, is_sparse, ) if is_bool_dtype(pd_dtype): # bool and extension booleans need early converstion because __array__ # converts mixed dtype dataframes into object dtypes return True if is_sparse(pd_dtype): # Sparse arrays will be converted later in `check_array` return False try: from pandas.api.types import is_extension_array_dtype except ImportError: return False if is_sparse(pd_dtype) or not is_extension_array_dtype(pd_dtype): # Sparse arrays will be converted later in `check_array` # Only handle extension arrays for integer and floats return False elif is_float_dtype(pd_dtype): # Float ndarrays can normally support nans. They need to be converted # first to map pd.NA to np.nan return True elif is_integer_dtype(pd_dtype): return True return False def check_array( array, accept_sparse=False, *, accept_large_sparse=True, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, ensure_min_samples=1, ensure_min_features=1, estimator=None, input_name="", ): """Input validation on an array, list, sparse matrix or similar. By default, the input is checked to be a non-empty 2D array containing only finite values. If the dtype of the array is object, attempt converting to float, raising on failure. Parameters ---------- array : object Input object to check / convert. accept_sparse : str, bool or list/tuple of str, default=False String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. accept_large_sparse : bool, default=True If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by accept_sparse, accept_large_sparse=False will cause it to be accepted only if its indices are stored with a 32-bit dtype. .. versionadded:: 0.20 dtype : 'numeric', type, list of type or None, default='numeric' Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : {'F', 'C'} or None, default=None Whether an array will be forced to be fortran or c-style. When order is None (default), then if copy=False, nothing is ensured about the memory layout of the output array; otherwise (copy=True) the memory layout of the returned array is kept as close as possible to the original array. copy : bool, default=False Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : bool or 'allow-nan', default=True Whether to raise an error on np.inf, np.nan, pd.NA in array. The possibilities are: - True: Force all values of array to be finite. - False: accepts np.inf, np.nan, pd.NA in array. - 'allow-nan': accepts only np.nan and pd.NA values in array. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. .. versionchanged:: 0.23 Accepts `pd.NA` and converts it into `np.nan` ensure_2d : bool, default=True Whether to raise a value error if array is not 2D. allow_nd : bool, default=False Whether to allow array.ndim > 2. ensure_min_samples : int, default=1 Make sure that the array has a minimum number of samples in its first axis (rows for a 2D array). Setting to 0 disables this check. ensure_min_features : int, default=1 Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when the input data has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. estimator : str or estimator instance, default=None If passed, include the name of the estimator in warning messages. input_name : str, default="" The data name used to construct the error message. In particular if `input_name` is "X" and the data has NaN values and allow_nan is False, the error message will link to the imputer documentation. .. versionadded:: 1.1.0 Returns ------- array_converted : object The converted and validated array. """ if isinstance(array, np.matrix): warnings.warn( "np.matrix usage is deprecated in 1.0 and will raise a TypeError " "in 1.2. Please convert to a numpy array with np.asarray. For " "more information see: " "https://numpy.org/doc/stable/reference/generated/numpy.matrix.html", # noqa FutureWarning, ) # store reference to original array to check if copy is needed when # function returns array_orig = array # store whether originally we wanted numeric dtype dtype_numeric = isinstance(dtype, str) and dtype == "numeric" dtype_orig = getattr(array, "dtype", None) if not hasattr(dtype_orig, "kind"): # not a data type (e.g. a column named dtype in a pandas DataFrame) dtype_orig = None # check if the object contains several dtypes (typically a pandas # DataFrame), and store them. If not, store None. dtypes_orig = None pandas_requires_conversion = False if hasattr(array, "dtypes") and hasattr(array.dtypes, "__array__"): # throw warning if columns are sparse. If all columns are sparse, then # array.sparse exists and sparsity will be preserved (later). with suppress(ImportError): from pandas.api.types import is_sparse if ( not hasattr(array, "sparse") and array.dtypes.apply(is_sparse).any() ): warnings.warn( "pandas.DataFrame with sparse columns found." "It will be converted to a dense numpy array." ) dtypes_orig = list(array.dtypes) pandas_requires_conversion = any( _pandas_dtype_needs_early_conversion(i) for i in dtypes_orig ) if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig): dtype_orig = np.result_type(*dtypes_orig) if dtype_numeric: if dtype_orig is not None and dtype_orig.kind == "O": # if input is object, convert to float. dtype = np.float64 else: dtype = None if isinstance(dtype, (list, tuple)): if dtype_orig is not None and dtype_orig in dtype: # no dtype conversion required dtype = None else: # dtype conversion required. Let's select the first element of the # list of accepted types. dtype = dtype[0] if pandas_requires_conversion: # pandas dataframe requires conversion earlier to handle extension dtypes with # nans # Use the original dtype for conversion if dtype is None new_dtype = dtype_orig if dtype is None else dtype array = array.astype(new_dtype) # Since we converted here, we do not need to convert again later dtype = None if force_all_finite not in (True, False, "allow-nan"): raise ValueError( f'force_all_finite should be a bool or "allow-nan". Got {force_all_finite!r} instead' ) estimator_name = _check_estimator_name(estimator) context = " by %s" % estimator_name if estimator is not None else "" # When all dataframe columns are sparse, convert to a sparse array if hasattr(array, "sparse") and array.ndim > 1: # DataFrame.sparse only supports `to_coo` array = array.sparse.to_coo() if array.dtype == np.dtype("object"): unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes]) if len(unique_dtypes) > 1: raise ValueError( "Pandas DataFrame with mixed sparse extension arrays " "generated a sparse matrix with object dtype which " "can not be converted to a scipy sparse matrix." "Sparse extension arrays should all have the same " "numeric type." ) if sp.issparse(array): _ensure_no_complex_data(array) array = _ensure_sparse_format( array, accept_sparse=accept_sparse, dtype=dtype, copy=copy, force_all_finite=force_all_finite, accept_large_sparse=accept_large_sparse, estimator_name=estimator_name, input_name=input_name, ) else: # If np.array(..) gives ComplexWarning, then we convert the warning # to an error. This is needed because specifying a non complex # dtype to the function converts complex to real dtype, # thereby passing the test made in the lines following the scope # of warnings context manager. with warnings.catch_warnings(): try: warnings.simplefilter("error", ComplexWarning) if dtype is not None and np.dtype(dtype).kind in "iu": # Conversion float -> int should not contain NaN or # inf (numpy#14412). We cannot use casting='safe' because # then conversion float -> int would be disallowed. array = np.asarray(array, order=order) if array.dtype.kind == "f": _assert_all_finite( array, allow_nan=False, msg_dtype=dtype, estimator_name=estimator_name, input_name=input_name, ) array = array.astype(dtype, casting="unsafe", copy=False) else: # Overwritten line to accept string input array = np.asarray(array, order=order) except ComplexWarning as complex_warning: raise ValueError( f"Complex data not supported\n{array}\n" ) from complex_warning # It is possible that the np.array(..) gave no warning. This happens # when no dtype conversion happened, for example dtype = None. The # result is that np.array(..) produces an array of complex dtype # and we need to catch and raise exception for such cases. _ensure_no_complex_data(array) if ensure_2d: # If input is scalar raise error if array.ndim == 0: raise ValueError( f"Expected 2D array, got scalar array instead:\narray={array}.\n" "Reshape your data either using array.reshape(-1, 1) if " "your data has a single feature or array.reshape(1, -1) " "if it contains a single sample." ) # If input is 1D raise error if array.ndim == 1: raise ValueError( f"Expected 2D array, got 1D array instead:\narray={array}.\n" "Reshape your data either using array.reshape(-1, 1) if " "your data has a single feature or array.reshape(1, -1) " "if it contains a single sample." ) if dtype_numeric and array.dtype.kind in "USV": raise ValueError( "dtype='numeric' is not compatible with arrays of bytes/strings." "Convert your data to numeric values explicitly instead." ) if not allow_nd and array.ndim >= 3: raise ValueError( f"Found array with dim {array.ndim}. {estimator_name} expected <= 2." ) if force_all_finite: _assert_all_finite( array, input_name=input_name, estimator_name=estimator_name, allow_nan=force_all_finite == "allow-nan", ) if ensure_min_samples > 0: n_samples = _num_samples(array) if n_samples < ensure_min_samples: raise ValueError( "Found array with %d sample(s) (shape=%s) while a" " minimum of %d is required%s." % (n_samples, array.shape, ensure_min_samples, context) ) if ensure_min_features > 0 and array.ndim == 2: n_features = array.shape[1] if n_features < ensure_min_features: raise ValueError( "Found array with %d feature(s) (shape=%s) while" " a minimum of %d is required%s." % (n_features, array.shape, ensure_min_features, context) ) if copy and np.may_share_memory(array, array_orig): array = np.array(array, dtype=dtype, order=order) return array def _check_large_sparse(X, accept_large_sparse=False): """Raise a ValueError if X has 64bit indices and accept_large_sparse=False""" if not accept_large_sparse: supported_indices = ["int32"] if X.getformat() == "coo": index_keys = ["col", "row"] elif X.getformat() in ["csr", "csc", "bsr"]: index_keys = ["indices", "indptr"] else: return for key in index_keys: indices_datatype = getattr(X, key).dtype if indices_datatype not in supported_indices: raise ValueError( "Only sparse matrices with 32-bit integer" " indices are accepted. Got %s indices." % indices_datatype ) def check_X_e( X, y, accept_sparse=False, *, accept_large_sparse=True, dtype="numeric", order=None, copy=False, force_all_finite=True, ensure_2d=True, allow_nd=False, multi_output=False, ensure_min_samples=1, ensure_min_features=1, y_numeric=False, estimator=None, ): """Input validation for standard estimators. Checks X and y for consistent length, enforces X to be 2D and y 1D. By default, X is checked to be non-empty and containing only finite values. Standard input checks are also applied to y, such as checking that y does not have np.nan or np.inf targets. For multi-label y, set multi_output=True to allow 2D and sparse y. If the dtype of X is object, attempt converting to float, raising on failure. Parameters ---------- X : {ndarray, list, sparse matrix} Input data. y : {ndarray, list, sparse matrix} Labels. accept_sparse : str, bool or list of str, default=False String[s] representing allowed sparse matrix formats, such as 'csc', 'csr', etc. If the input is sparse but not in the allowed format, it will be converted to the first listed format. True allows the input to be any format. False means that a sparse matrix input will raise an error. accept_large_sparse : bool, default=True If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by accept_sparse, accept_large_sparse will cause it to be accepted only if its indices are stored with a 32-bit dtype. .. versionadded:: 0.20 dtype : 'numeric', type, list of type or None, default='numeric' Data type of result. If None, the dtype of the input is preserved. If "numeric", dtype is preserved unless array.dtype is object. If dtype is a list of types, conversion on the first type is only performed if the dtype of the input is not in the list. order : {'F', 'C'}, default=None Whether an array will be forced to be fortran or c-style. copy : bool, default=False Whether a forced copy will be triggered. If copy=False, a copy might be triggered by a conversion. force_all_finite : bool or 'allow-nan', default=True Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter does not influence whether y can have np.inf, np.nan, pd.NA values. The possibilities are: - True: Force all values of X to be finite. - False: accepts np.inf, np.nan, pd.NA in X. - 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot be infinite. .. versionadded:: 0.20 ``force_all_finite`` accepts the string ``'allow-nan'``. .. versionchanged:: 0.23 Accepts `pd.NA` and converts it into `np.nan` ensure_2d : bool, default=True Whether to raise a value error if X is not 2D. allow_nd : bool, default=False Whether to allow X.ndim > 2. multi_output : bool, default=False Whether to allow 2D y (array or sparse matrix). If false, y will be validated as a vector. y cannot have np.nan or np.inf values if multi_output=True. ensure_min_samples : int, default=1 Make sure that X has a minimum number of samples in its first axis (rows for a 2D array). ensure_min_features : int, default=1 Make sure that the 2D array has some minimum number of features (columns). The default value of 1 rejects empty datasets. This check is only enforced when X has effectively 2 dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0 disables this check. y_numeric : bool, default=False Whether to ensure that y has a numeric type. If dtype of y is object, it is converted to float64. Should only be used for regression algorithms. estimator : str or estimator instance, default=None If passed, include the name of the estimator in warning messages. Returns ------- X_converted : object The converted and validated X. y_converted : object The converted and validated y. """ if y is None: if estimator is None: estimator_name = "estimator" else: estimator_name = _check_estimator_name(estimator) raise ValueError( f"{estimator_name} requires y to be passed, but the target y is None" ) X = check_array( X, accept_sparse=accept_sparse, accept_large_sparse=accept_large_sparse, dtype=dtype, order=order, copy=copy, force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=ensure_min_features, estimator=estimator, input_name="X", ) y = _check_y( y, multi_output=multi_output, y_numeric=y_numeric, estimator=estimator ) check_consistent_length(X, y) return X, y def _check_y(y, multi_output=False, y_numeric=False, estimator=None): """Isolated part of check_X_e dedicated to y validation""" if multi_output: y = check_array( y, accept_sparse="csr", force_all_finite=True, ensure_2d=False, dtype=None, input_name="y", estimator=estimator, ) else: estimator_name = _check_estimator_name(estimator) y = column_or_1d(y, warn=True) _assert_all_finite(y, input_name="y", estimator_name=estimator_name) _ensure_no_complex_data(y) if y_numeric and y.dtype.kind in ("O", "b"): y = y.astype(np.float32) return y def column_or_1d(y, *, warn=False): """Ravel column or 1d numpy array, else raises an error. Parameters ---------- y : array-like Input data. warn : bool, default=False To control display of warnings. Returns ------- y : ndarray Output data. Raises ------ ValueError If `y` is not a 1D array or a 2D array with a single row or column. """ y = np.asarray(y) shape = np.shape(y) if len(shape) == 1: return np.ravel(y) if len(shape) == 2 and shape[1] == 1: if warn: warnings.warn( "A column-vector y was passed when a 1d array was" " expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning, stacklevel=2, ) return np.ravel(y) raise ValueError( f"y should be a 1d array, got an array of shape {shape} instead." )
zeno-sliceline
/zeno_sliceline-0.0.1-py3-none-any.whl/sliceline/validation.py
validation.py
zeno ======== In honor of Zeno the dog Usage ----- # Build the virtualenv and install required modules, # including this one in editable mode 0/make_venv.sh . Venv/bin/activate # Build the module 0/build.sh # Register the built module with PyPI 0/register.sh # Upload a new build to PyPI 0/upload.sh # Delete all built files 0/clean.sh Development ----------- Run `0/make_venv.sh` first . Venv/bin/activate # This IPython config will set autoreload ipython --config=0/ipython_config.py
zeno
/zeno-0.0.1.tar.gz/zeno-0.0.1/README.rst
README.rst
.. This file is part of Zenodo. Copyright (C) 2016 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. Changes ======= Version 1.0.0a3 (released April 2nd, 2019) - Initial public release.
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/CHANGES.rst
CHANGES.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. ======================= Zenodo-AccessRequests ======================= .. image:: https://img.shields.io/travis/zenodo/zenodo-accessrequests.svg :target: https://travis-ci.org/zenodo/zenodo-accessrequests .. image:: https://img.shields.io/coveralls/zenodo/zenodo-accessrequests.svg :target: https://coveralls.io/r/zenodo/zenodo-accessrequests .. image:: https://img.shields.io/github/tag/zenodo/zenodo-accessrequests.svg :target: https://github.com/zenodo/zenodo-accessrequests/releases .. image:: https://img.shields.io/pypi/dm/zenodo-accessrequests.svg :target: https://pypi.python.org/pypi/zenodo-accessrequests .. image:: https://img.shields.io/github/license/zenodo/zenodo-accessrequests.svg :target: https://github.com/zenodo/zenodo-accessrequests/blob/master/LICENSE Zenodo module for providing access request feature. *This is an experimental developer preview release.* * Free software: GPLv2 license * Documentation: https://pythonhosted.org/zenodo-accessrequests/
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/README.rst
README.rst
Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/zenodo/zenodo-accessrequests/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "feature" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ Zenodo-AccessRequests could always use more documentation, whether as part of the official Zenodo-AccessRequests docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/zenodo/zenodo-accessrequests/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `invenio` for local development. 1. Fork the `invenio` repo on GitHub. 2. Clone your fork locally: .. code-block:: console $ git clone [email protected]:your_name_here/zenodo-accessrequests.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development: .. code-block:: console $ mkvirtualenv zenodo-accessrequests $ cd zenodo-accessrequests/ $ pip install -e .[all] 4. Create a branch for local development: .. code-block:: console $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass tests: .. code-block:: console $ ./run-tests.sh The tests will provide you with test coverage and also check PEP8 (code style), PEP257 (documentation), flake8 as well as build the Sphinx documentation and run doctests. 6. Commit your changes and push your branch to GitHub: .. code-block:: console $ git add . $ git commit -s -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests and must not decrease test coverage. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring. 3. The pull request should work for Python 2.7, 3.3, 3.4 and 3.5. Check https://travis-ci.com/zenodo/zenodo-accessrequests/pull_requests and make sure that the tests pass for all supported Python versions.
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/CONTRIBUTING.rst
CONTRIBUTING.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. Authors ======= Zenodo module for providing access request feature. - Adrian Baran Pawel <[email protected]> - Lars Holm Nielsen <[email protected]> - Leonardo Rossi <[email protected]>
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/AUTHORS.rst
AUTHORS.rst
================================ Zenodo-AccessRequests v1.0.0a2 ================================ Zenodo-AccessRequests v1.0.0a2 was released on November 24, 2016. About ----- Zenodo module for providing access request feature. What's new ---------- - Initial public release. Installation ------------ $ pip install zenodo-accessrequests==1.0.0a2 Documentation ------------- http://pythonhosted.org/zenodo-accessrequests/ Happy hacking and thanks for flying Zenodo-AccessRequests. | Invenio Development Team | Email: [email protected] | IRC: #invenio on irc.freenode.net | Twitter: http://twitter.com/inveniosoftware | GitHub: https://github.com/zenodo/zenodo-accessrequests | URL: http://invenio-software.org
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/RELEASE-NOTES.rst
RELEASE-NOTES.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. .. include:: ../CONTRIBUTING.rst
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/contributing.rst
contributing.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. .. include:: ../CHANGES.rst
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/changes.rst
changes.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. .. include:: ../AUTHORS.rst
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/authors.rst
authors.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. ======= Usage ======= .. automodule:: zenodo_accessrequests
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/usage.rst
usage.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. .. include:: ../README.rst User's Guide ------------ This part of the documentation will show you how to get started in using Invenio-Base. .. toctree:: :maxdepth: 2 installation usage API Reference ------------- If you are looking for information on a specific function, class or method, this part of the documentation is for you. .. toctree:: :maxdepth: 2 api Additional Notes ---------------- Notes on how to contribute, legal information and changes are here for the interested. .. toctree:: :maxdepth: 1 contributing changes license authors
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/index.rst
index.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. .. include:: ../INSTALL.rst
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/installation.rst
installation.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. API Docs ======== zenodo_accessrequests ---------------------
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/api.rst
api.rst
License ======= Invenio is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Invenio is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Invenio; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction.
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/license.rst
license.rst
from __future__ import absolute_import, print_function import os from time import sleep from flask import Flask from flask_menu import Menu as FlaskMenu from invenio_access import InvenioAccess from invenio_accounts import InvenioAccounts from invenio_accounts.testutils import create_test_user from invenio_accounts.views import blueprint as blueprint_user from invenio_admin import InvenioAdmin from invenio_db import InvenioDB, db from invenio_i18n import InvenioI18N from invenio_indexer import InvenioIndexer from invenio_indexer.api import RecordIndexer from invenio_mail import InvenioMail as Mail from invenio_pidstore import InvenioPIDStore from invenio_records import InvenioRecords from invenio_records_ui import InvenioRecordsUI from invenio_search import InvenioSearch from invenio_userprofiles import InvenioUserProfiles from invenio_userprofiles.views import \ blueprint_ui_init as userprofiles_blueprint_ui_init from zenodo_accessrequests import ZenodoAccessRequests from zenodo_accessrequests.views.requests import blueprint as request_blueprint from zenodo_accessrequests.views.settings import \ blueprint as settings_blueprint # Create Flask application app = Flask(__name__) app.config.update( # DEBUG=True, CELERY_ALWAYS_EAGER=True, CELERY_CACHE_BACKEND="memory", CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_RESULT_BACKEND="cache", MAIL_SUPPRESS_SEND=True, TESTING=True, SECRET_KEY='TEST', SQLALCHEMY_DATABASE_URI=os.environ.get( 'SQLALCHEMY_DATABASE_URI', 'sqlite:///app.db' ), SECURITY_PASSWORD_SALT='security-password-salt', RECORDS_UI_ENDPOINTS=dict( recid=dict( pid_type='recid', route='/records/<pid_value>', template='invenio_records_ui/detail.html', ), recid_access_request=dict( pid_type='recid', route='/records/<pid_value>/accessrequest', template='zenodo_accessrequests/access_request.html', view_imp='zenodo_accessrequests.views.requests.access_request', methods=['GET', 'POST'], ), recid_access_request_email_confirm=dict( pid_type='recid', route='/records/<pid_value>/accessrequest/<token>/confirm', # template='invenio_records_ui/detail.html', view_imp='zenodo_accessrequests.views.requests.confirm', ), ) ) InvenioDB(app) InvenioAccounts(app) InvenioUserProfiles(app) InvenioRecords(app) InvenioI18N(app) FlaskMenu(app) Mail(app) InvenioRecordsUI(app) ZenodoAccessRequests(app) InvenioPIDStore(app) InvenioIndexer(app) InvenioSearch(app) InvenioAccess(app) InvenioAdmin(app, permission_factory=lambda x: x, view_class_factory=lambda x: x) app.register_blueprint(request_blueprint) app.register_blueprint(settings_blueprint) app.register_blueprint(blueprint_user) app.register_blueprint(userprofiles_blueprint_ui_init) @app.cli.group() def fixtures(): """Command for working with test data.""" @fixtures.command() def records(): """Load test data fixture.""" import uuid from invenio_records.api import Record from invenio_pidstore.models import PersistentIdentifier, PIDStatus create_test_user() indexer = RecordIndexer() # Record 1 - Live record with db.session.begin_nested(): rec_uuid = uuid.uuid4() pid1 = PersistentIdentifier.create( 'recid', '1', object_type='rec', object_uuid=rec_uuid, status=PIDStatus.REGISTERED) Record.create({ 'title': 'Registered', 'description': 'This is an awesome description', 'control_number': '1', 'access_right': 'restricted', 'access_conditions': 'fuu', 'owners': [1, 2], 'recid': 1 }, id_=rec_uuid) indexer.index_by_id(pid1.object_uuid) db.session.commit() sleep(3)
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/examples/app.py
app.py
from __future__ import absolute_import, print_function from sqlalchemy.sql.expression import desc class Ordering(object): """Helper class for column sorting.""" def __init__(self, options, selected): """Initialize ordering with possible options the selected option. :param options: List of column options. :param selected: Selected column. Prefix name with ``-`` to denote descending ordering. """ self.options = options if selected in options: self._selected = selected self.asc = True elif selected and selected[0] == '-' and selected[1:] in options: self._selected = selected[1:] self.asc = False else: self._selected = None self.asc = None def reverse(self, col): """Get reverse direction of ordering.""" if col in self.options: if self.is_selected(col): return col if not self.asc else '-{0}'.format(col) else: return col return None def dir(self, col, asc='asc', desc='desc'): """Get direction (ascending/descending) of ordering.""" if col == self._selected and self.asc is not None: return asc if self.asc else desc else: return None def is_selected(self, col): """Determine if column is being order by.""" return col == self._selected def selected(self): """Get column which is being order by.""" if self._selected: return self._selected if self.asc else \ "-{0}".format(self._selected) return None class QueryOrdering(Ordering): """Helper class for column sorting based on SQLAlchemy queries.""" def __init__(self, query, options, selected): """Initialize with SQLAlchemy query.""" super(QueryOrdering, self).__init__(options, selected) self.query = query def items(self): """Get query with correct ordering.""" if self.asc is not None: if self._selected and self.asc: return self.query.order_by(self._selected) elif self._selected and not self.asc: return self.query.order_by(desc(self._selected)) return self.query
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/helpers.py
helpers.py
from __future__ import absolute_import, print_function import binascii import os from base64 import urlsafe_b64encode from datetime import datetime from flask import current_app from itsdangerous import BadData, JSONWebSignatureSerializer, \ SignatureExpired, TimedJSONWebSignatureSerializer SUPPORTED_DIGEST_ALGORITHMS = ('HS256', 'HS512') class TokenMixin(object): """Mix-in class for token serializers.""" def create_token(self, obj_id, extra_data): """Create a token referencing the object id with extra data. Note random data is added to ensure that no two tokens are identical. """ return self.dumps( dict( id=obj_id, data=extra_data, rnd=binascii.hexlify(os.urandom(4)).decode('utf-8') ) ) def validate_token(self, token, expected_data=None): """Validate secret link token. :param token: Token value. :param expected_data: A dictionary of key/values that must be present in the data part of the token (i.e. included via ``extra_data`` in ``create_token``). """ try: # Load token and remove random data. data = self.load_token(token) # Compare expected data with data in token. if expected_data: for k in expected_data: if expected_data[k] != data["data"].get(k): return None return data except BadData: return None def load_token(self, token, force=False): """Load data in a token. :param token: Token to load. :param force: Load token data even if signature expired. Default: False. """ try: data = self.loads(token) except SignatureExpired as e: if not force: raise data = e.payload del data["rnd"] return data class EncryptedTokenMixIn(TokenMixin): """Mix-in class for token serializers that generate encrypted tokens.""" @property def engine(self): """Get cryptographic engine.""" if not hasattr(self, '_engine'): from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes digest = hashes.Hash(hashes.SHA256(), backend=default_backend()) digest.update(current_app.config['SECRET_KEY'].encode('utf8')) fernet_key = urlsafe_b64encode(digest.finalize()) self._engine = Fernet(fernet_key) return self._engine def create_token(self, obj_id, extra_data): """Create a token referencing the object id with extra data.""" return self.engine.encrypt( super(EncryptedTokenMixIn, self).create_token(obj_id, extra_data) ) def load_token(self, token, force=False): """Load data in a token. :param token: Token to load. :param force: Load token data even if signature expired. Default: False. """ return super(EncryptedTokenMixIn, self).load_token( self.engine.decrypt(token), force=force ) class EmailConfirmationSerializer(TimedJSONWebSignatureSerializer, TokenMixin): """Serializer for email confirmation link tokens. Depends upon the secrecy of ``SECRET_KEY``. Tokens expire after a specific time (defaults to ``ACCESSREQUESTS_CONFIRMLINK_EXPIRES_IN``). The access request id as well as the email address is stored in the token together with a random bit to ensure all tokens are unique. """ def __init__(self, expires_in=None, **kwargs): """Initialize underlying TimedJSONWebSignatureSerializer.""" dt = expires_in or \ current_app.config['ACCESSREQUESTS_CONFIRMLINK_EXPIRES_IN'] super(EmailConfirmationSerializer, self).__init__( current_app.config['SECRET_KEY'], expires_in=dt, salt='accessrequests-email', **kwargs ) @classmethod def compat_validate_token(cls, **kwargs): """Multiple algorithm-compatible token validation.""" for algorithm in SUPPORTED_DIGEST_ALGORITHMS: try: cls(algorithm_name=algorithm).validate_token(**kwargs) except BadData: # move to next algorithm continue class SecretLinkSerializer(JSONWebSignatureSerializer, TokenMixin): """Serializer for secret links.""" def __init__(self, **kwargs): """Initialize underlying JSONWebSignatureSerializer.""" super(SecretLinkSerializer, self).__init__( current_app.config['SECRET_KEY'], salt='accessrequests-link', **kwargs ) class TimedSecretLinkSerializer(TimedJSONWebSignatureSerializer, TokenMixin): """Serializer for expiring secret links.""" def __init__(self, expires_at=None, **kwargs): """Initialize underlying TimedJSONWebSignatureSerializer.""" assert isinstance(expires_at, datetime) or expires_at is None dt = expires_at - datetime.now() if expires_at else None super(TimedSecretLinkSerializer, self).__init__( current_app.config['SECRET_KEY'], expires_in=int(dt.total_seconds()) if dt else None, salt='accessrequests-timedlink', **kwargs ) class SecretLinkFactory(object): """Functions for creating and validating any secret link tokens.""" @classmethod def create_token(cls, obj_id, data, expires_at=None): """Create the secret link token.""" if expires_at: s = TimedSecretLinkSerializer(expires_at=expires_at) else: s = SecretLinkSerializer() return s.create_token(obj_id, data) @classmethod def validate_token(cls, token, expected_data=None): """Validate a secret link token (non-expiring + expiring).""" for algorithm in SUPPORTED_DIGEST_ALGORITHMS: s = SecretLinkSerializer(algorithm_name=algorithm) st = TimedSecretLinkSerializer(algorithm_name=algorithm) try: for serializer in (s, st): data = serializer.validate_token( token, expected_data=expected_data) if data: return data except SignatureExpired: # move to next algorithm raise except BadData: continue # move to next serializer/algorithm @classmethod def load_token(cls, token, force=False): """Validate a secret link token (non-expiring + expiring).""" for algorithm in SUPPORTED_DIGEST_ALGORITHMS: s = SecretLinkSerializer(algorithm_name=algorithm) st = TimedSecretLinkSerializer(algorithm_name=algorithm) for serializer in (s, st): try: data = serializer.load_token(token, force=force) if data: return data except SignatureExpired: raise # signature was parsed and is expired except BadData: continue # move to next serializer/algorithm
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/tokens.py
tokens.py
from __future__ import absolute_import, print_function from copy import deepcopy from datetime import date, datetime from flask import current_app, url_for from flask_babelex import gettext as _ from invenio_accounts.models import User from invenio_db import db from sqlalchemy_utils.types import ChoiceType, EncryptedType from .errors import InvalidRequestStateError from .signals import link_created, link_revoked, request_accepted, \ request_confirmed, request_created, request_rejected from .tokens import SecretLinkFactory # TODO: UTC timestamps + localization. def secret_key(): """Return secret key as bytearray.""" return current_app.config['SECRET_KEY'].encode('utf-8') class RequestStatus(object): """Access request status representation.""" EMAIL_VALIDATION = u'C' PENDING = u'P' ACCEPTED = u'A' REJECTED = u'R' class SecretLink(db.Model): """Represent a secret link to a record restricted files.""" __tablename__ = 'accessrequests_link' id = db.Column(db.Integer, primary_key=True, autoincrement=True) """Secret link id.""" token = db.Column( EncryptedType(type_in=db.Text, key=secret_key), nullable=False ) """Secret token for link (should be stored encrypted).""" owner_user_id = db.Column( db.Integer, db.ForeignKey(User.id), nullable=False, default=None ) """Owner's user id.""" owner = db.relationship(User, foreign_keys=[owner_user_id]) """Relationship to user""" created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, index=True) """Creation timestamp.""" expires_at = db.Column(db.DateTime, nullable=True) """Expiration date.""" revoked_at = db.Column(db.DateTime, nullable=True, index=True) """Creation timestamp.""" title = db.Column(db.String(length=255), nullable=False, default='') """Title of link.""" description = db.Column(db.Text, nullable=False, default='') """Description of link.""" @classmethod def create(cls, title, owner, extra_data, description="", expires_at=None): """Create a new secret link.""" if isinstance(expires_at, date): expires_at = datetime.combine(expires_at, datetime.min.time()) with db.session.begin_nested(): obj = cls( owner=owner, title=title, description=description, expires_at=expires_at, token='', ) db.session.add(obj) with db.session.begin_nested(): # Create token (dependent on obj.id and recid) obj.token = SecretLinkFactory.create_token( obj.id, extra_data, expires_at=expires_at ).decode('utf8') link_created.send(obj) return obj @classmethod def validate_token(cls, token, expected_data): """Validate a secret link token. Only queries the database if token is valid to determine that the token has not been revoked. """ data = SecretLinkFactory.validate_token( token, expected_data=expected_data ) if data: link = cls.query.get(data['id']) if link and link.is_valid(): return True return False @classmethod def query_by_owner(cls, user): """Get secret links by user.""" return cls.query.filter_by( owner_user_id=user.id ) @property def extra_data(self): """Load token data stored in token (ignores expiry date of tokens).""" if self.token: return SecretLinkFactory.load_token(self.token, force=True)["data"] return None def get_absolute_url(self, endpoint): """Get absolute for secret link (using https scheme). The endpoint is passed to ``url_for`` with ``token`` and ``extra_data`` as keyword arguments. E.g.:: >>> link.extra_data dict(recid=1) >>> link.get_absolute_url('record.metadata') translates into:: >>> url_for('record.metadata', token="...", recid=1, ) """ copy = deepcopy(self.extra_data) if 'recid' in copy: copy['pid_value'] = copy.pop('recid') return url_for( endpoint, token=self.token, _external=True, **(copy or {}) ) def revoke(self): """Revoken a secret link.""" if self.revoked_at is None: with db.session.begin_nested(): self.revoked_at = datetime.utcnow() link_revoked.send(self) return True return False def is_expired(self): """Determine if link is expired.""" if self.expires_at: return datetime.utcnow() > self.expires_at return False def is_revoked(self): """Determine if link is revoked.""" return self.revoked_at is not None def is_valid(self): """Determine if link is still valid.""" return not(self.is_expired() or self.is_revoked()) class AccessRequest(db.Model): """Represent an request for access to restricted files in a record.""" __tablename__ = 'accessrequests_request' STATUS_CODES = { RequestStatus.EMAIL_VALIDATION: _(u'Email validation'), RequestStatus.PENDING: _(u'Pending'), RequestStatus.ACCEPTED: _(u'Accepted'), RequestStatus.REJECTED: _(u'Rejected'), } id = db.Column(db.Integer, primary_key=True, autoincrement=True) """Access request ID.""" status = db.Column( ChoiceType(STATUS_CODES.items(), impl=db.CHAR(1)), nullable=False, index=True ) """Status of request.""" receiver_user_id = db.Column( db.Integer, db.ForeignKey(User.id), nullable=False, default=None ) """Receiver's user id.""" receiver = db.relationship(User, foreign_keys=[receiver_user_id]) """Relationship to user""" sender_user_id = db.Column( db.Integer, db.ForeignKey(User.id), nullable=True, default=None ) """Sender's user id (for authenticated users).""" sender = db.relationship(User, foreign_keys=[sender_user_id]) """Relationship to user for a sender""" sender_full_name = db.Column(db.String(length=255), nullable=False, default='') """Sender's full name.""" sender_email = db.Column(db.String(length=255), nullable=False, default='') """Sender's email address.""" recid = db.Column(db.Integer, nullable=False, index=True) """Record concerned for the request.""" created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, index=True) """Creation timestamp.""" modified = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow) """Last modification timestamp.""" justification = db.Column(db.Text, default='', nullable=False) """Sender's justification for how they fulfill conditions.""" message = db.Column(db.Text, default='', nullable=False) """Receivers message to the sender.""" link_id = db.Column( db.Integer, db.ForeignKey(SecretLink.id), nullable=True, default=None ) """Relation to secret link if request was accepted.""" link = db.relationship(SecretLink, foreign_keys=[link_id]) """Relationship to secret link.""" @classmethod def create(cls, recid=None, receiver=None, sender_full_name=None, sender_email=None, justification=None, sender=None): """Create a new access request. :param recid: Record id (required). :param receiver: User object of receiver (required). :param sender_full_name: Full name of sender (required). :param sender_email: Email address of sender (required). :param justification: Justification message (required). :param sender: User object of sender (optional). """ sender_user_id = None if sender is None else sender.id assert recid assert receiver assert sender_full_name assert sender_email assert justification # Determine status status = RequestStatus.EMAIL_VALIDATION if sender and sender.confirmed_at: status = RequestStatus.PENDING with db.session.begin_nested(): # Create object obj = cls( status=status, recid=recid, receiver_user_id=receiver.id, sender_user_id=sender_user_id, sender_full_name=sender_full_name, sender_email=sender_email, justification=justification ) db.session.add(obj) # Send signal if obj.status == RequestStatus.EMAIL_VALIDATION: request_created.send(obj) else: request_confirmed.send(obj) return obj @classmethod def query_by_receiver(cls, user): """Get access requests for a specific receiver.""" return cls.query.filter_by( receiver_user_id=user.id ) @classmethod def get_by_receiver(cls, request_id, user): """Get access request for a specific receiver.""" return cls.query.filter_by( id=request_id, receiver_user_id=user.id ).first() def confirm_email(self): """Confirm that senders email is valid.""" with db.session.begin_nested(): if self.status != RequestStatus.EMAIL_VALIDATION: raise InvalidRequestStateError(RequestStatus.EMAIL_VALIDATION) self.status = RequestStatus.PENDING request_confirmed.send(self) def accept(self, message=None, expires_at=None): """Accept request.""" with db.session.begin_nested(): if self.status != RequestStatus.PENDING: raise InvalidRequestStateError(RequestStatus.PENDING) self.status = RequestStatus.ACCEPTED request_accepted.send(self, message=message, expires_at=expires_at) def reject(self, message=None): """Reject request.""" with db.session.begin_nested(): if self.status != RequestStatus.PENDING: raise InvalidRequestStateError(RequestStatus.PENDING) self.status = RequestStatus.REJECTED request_rejected.send(self, message=message) def create_secret_link(self, title, description=None, expires_at=None): """Create a secret link from request.""" self.link = SecretLink.create( title, self.receiver, extra_data=dict(recid=self.recid), description=description, expires_at=expires_at, ) return self.link
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/models.py
models.py
from __future__ import absolute_import, print_function from datetime import datetime, timedelta from flask_babelex import gettext as _ from flask_security.forms import email_required, email_validator from flask_wtf import Form from wtforms import DateField, HiddenField, StringField, SubmitField, \ TextAreaField, validators from .widgets import Button def validate_expires_at(form, field): """Validate that date is in the future.""" if form.accept.data: if not field.data or datetime.utcnow().date() >= field.data: raise validators.StopValidation(_( "Please provide a future date." )) if not field.data or \ datetime.utcnow().date() + timedelta(days=365) < field.data: raise validators.StopValidation(_( "Please provide a date no more than 1 year into the future." )) class AccessRequestForm(Form): """Form for requesting access to a record.""" full_name = StringField( label=_("Full name"), description=_("Required."), validators=[validators.DataRequired()] ) email = StringField( label=_("Email address"), description=_( "Required. Please carefully check your email address. If the owner" " grants access, a secret link will be sent to this email address." ), validators=[email_required, email_validator] ) justification = TextAreaField( label=_("Justification"), description=_( "Required. Please thoroughly justify how you fulfil the " "conditions listed above."), validators=[validators.DataRequired()], ) class ApprovalForm(Form): """Form used to approve/reject requests.""" request = HiddenField() message = TextAreaField( label=_("Message to requester"), description=_( "Required if you reject the request. Optional if you accept the" " request."), ) expires_at = DateField( label=_('Expires'), description=_( 'Format: YYYY-MM-DD. Required if you accept the request. The ' 'access will automatically be revoked on this date. Date must be ' 'within the next year.' ), default=lambda: datetime.utcnow().date() + timedelta(days=31), validators=[validate_expires_at, validators.Optional()], ) accept = SubmitField(_("Accept"), widget=Button(icon="fa fa-check")) reject = SubmitField(_("Reject"), widget=Button(icon="fa fa-times")) def validate_accept(form, field): """Validate that accept have not been set.""" if field.data and form.reject.data: raise validators.ValidationError( _("Both reject and accept cannot be set at the same time.") ) def validate_reject(form, field): """Validate that accept have not been set.""" if field.data and form.accept.data: raise validators.ValidationError( _("Both reject and accept cannot be set at the same time.") ) def validate_message(form, field): """Validate message.""" if form.reject.data and not field.data.strip(): raise validators.ValidationError( _("You are required to provide message to the requester when" " you reject a request.") ) class DeleteForm(Form): """Form used for delete buttons.""" link = HiddenField() delete = SubmitField(_("Revoke"), widget=Button(icon="fa fa-trash-o"))
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/forms.py
forms.py
from __future__ import absolute_import, print_function from datetime import timedelta from flask import current_app, render_template, url_for from flask_babelex import gettext as _ from flask_mail import Message from invenio_mail.tasks import send_email from .errors import RecordNotFound from .signals import request_accepted, request_confirmed, request_created, \ request_rejected from .tokens import EmailConfirmationSerializer from .utils import get_record def connect_receivers(): """Connect receivers to signals.""" request_created.connect(send_email_validation) request_confirmed.connect(send_confirmed_notifications) request_rejected.connect(send_reject_notification) # Order is important: request_accepted.connect(create_secret_link) request_accepted.connect(send_accept_notification) def create_secret_link(request, message=None, expires_at=None): """Receiver for request-accepted signal.""" pid, record = get_record(request.recid) if not record: raise RecordNotFound(request.recid) description = render_template( "zenodo_accessrequests/link_description.tpl", request=request, record=record, pid=pid, expires_at=expires_at, message=message, ) request.create_secret_link( record["title"], description=description, expires_at=expires_at ) def send_accept_notification(request, message=None, expires_at=None): """Receiver for request-accepted signal to send email notification.""" pid, record = get_record(request.recid) _send_notification( request.sender_email, _("Access request accepted"), "zenodo_accessrequests/emails/accepted.tpl", request=request, record=record, pid=pid, record_link=request.link.get_absolute_url('invenio_records_ui.recid'), message=message, expires_at=expires_at, ) def send_confirmed_notifications(request): """Receiver for request-confirmed signal to send email notification.""" pid, record = get_record(request.recid) if record is None: current_app.logger.error("Cannot retrieve record %s. Emails not sent" % request.recid) return title = _("Access request: %(record)s", record=record["title"]) _send_notification( request.receiver.email, title, "zenodo_accessrequests/emails/new_request.tpl", request=request, record=record, pid=pid, ) _send_notification( request.sender_email, title, "zenodo_accessrequests/emails/confirmation.tpl", request=request, record=record, pid=pid, ) def send_email_validation(request): """Receiver for request-created signal to send email notification.""" token = EmailConfirmationSerializer().create_token( request.id, dict(email=request.sender_email) ) pid, record = get_record(request.recid) _send_notification( request.sender_email, _("Access request verification"), "zenodo_accessrequests/emails/validate_email.tpl", request=request, record=record, pid=pid, days=timedelta( seconds=current_app.config["ACCESSREQUESTS_CONFIRMLINK_EXPIRES_IN"] ).days, confirm_link=url_for( "invenio_records_ui.recid_access_request_email_confirm", pid_value=request.recid, token=token, _external=True, ) ) def send_reject_notification(request, message=None): """Receiver for request-rejected signal to send email notification.""" pid, record = get_record(request.recid) _send_notification( request.sender_email, _("Access request rejected"), "zenodo_accessrequests/emails/rejected.tpl", request=request, record=record, pid=pid, message=message, ) def _send_notification(to, subject, template, **ctx): """Render a template and send as email.""" msg = Message( subject, sender=current_app.config.get('SUPPORT_EMAIL'), recipients=[to] ) msg.body = render_template(template, **ctx) send_email.delay(msg.__dict__)
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/receivers.py
receivers.py
from __future__ import absolute_import, print_function import re from flask import Blueprint, abort, flash, redirect, render_template, \ request, url_for from flask_babelex import gettext as _ from flask_breadcrumbs import register_breadcrumb from flask_login import current_user, login_required from flask_menu import register_menu from invenio_db import db from jinja2 import Markup, escape, evalcontextfilter from ..forms import ApprovalForm, DeleteForm from ..helpers import QueryOrdering from ..models import AccessRequest, RequestStatus, SecretLink from ..utils import get_record blueprint = Blueprint( 'zenodo_accessrequests_settings', __name__, url_prefix="/account/settings/sharedlinks", static_folder="../static", template_folder="../templates", ) _paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}') @blueprint.app_template_filter() @evalcontextfilter def nl2br(eval_ctx, value): """Template filter to convert newlines to <br>-tags.""" result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br>\n') for p in _paragraph_re.split(escape(value))) if eval_ctx.autoescape: result = Markup(result) return result @blueprint.route("/", methods=['GET', 'POST']) @login_required @register_menu( blueprint, 'settings.sharedlinks', _('%(icon)s Shared links', icon='<i class="fa fa-share fa-fw"></i>'), order=9, active_when=lambda: request.endpoint.startswith( "zenodo_accessrequests_settings.") ) @register_breadcrumb( blueprint, 'breadcrumbs.settings.sharedlinks', _('Shared links') ) def index(): """List pending access requests and shared links.""" query = request.args.get('query', '') order = request.args.get('sort', '-created') try: page = int(request.args.get('page', 1)) per_page = int(request.args.get('per_page', 20)) except (TypeError, ValueError): abort(404) # Delete form form = DeleteForm(request.form) if form.validate_on_submit(): link = SecretLink.query_by_owner(current_user).filter_by( id=form.link.data).first() if link.revoke(): flash(_("Shared link revoked."), category='success') db.session.commit() # Links links = SecretLink.query_by_owner(current_user).filter( SecretLink.revoked_at.is_(None) ) # Querying if query: lquery = "%{0}%".format(query) links = links.filter( SecretLink.title.like(lquery) | SecretLink.description.like(lquery) ) # Ordering ordering = QueryOrdering(links, ['title', 'created', 'expires_at'], order) links = ordering.items() # Pending access requests requests = AccessRequest.query_by_receiver(current_user).filter_by( status=RequestStatus.PENDING).order_by('created') return render_template( "zenodo_accessrequests/settings/index.html", links_pagination=links.paginate(page, per_page=per_page), requests=requests, query=query, order=ordering, get_record=get_record, form=DeleteForm(), ) @blueprint.route("/accessrequest/<int:request_id>/", methods=['GET', 'POST']) @login_required @register_breadcrumb( blueprint, 'breadcrumbs.settings.sharedlinks.accessrequest', _('Access request') ) def accessrequest(request_id): """Accept/reject access request.""" r = AccessRequest.get_by_receiver(request_id, current_user) if not r or r.status != RequestStatus.PENDING: abort(404) form = ApprovalForm(request.form) if form.validate_on_submit(): if form.accept.data: r.accept(message=form.data['message'], expires_at=form.expires_at.data) db.session.commit() flash(_("Request accepted.")) return redirect(url_for(".index")) elif form.reject.data: r.reject(message=form.data['message']) db.session.commit() flash(_("Request rejected.")) return redirect(url_for(".index")) pid, record = get_record(r.recid) return render_template( "zenodo_accessrequests/settings/request.html", accessrequest=r, record=record, form=form, )
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/views/settings.py
settings.py
from __future__ import absolute_import, print_function from datetime import datetime from flask import Blueprint, abort, current_app, flash, redirect, \ render_template, request, url_for from flask_babelex import gettext as _ from flask_login import current_user from invenio_db import db from werkzeug.local import LocalProxy from ..forms import AccessRequestForm from ..models import AccessRequest, RequestStatus from ..tokens import EmailConfirmationSerializer blueprint = Blueprint( 'zenodo_accessrequests', __name__, url_prefix="/record", static_folder="../static", template_folder="../templates", ) # # Template filters # @blueprint.app_template_filter(name="is_restricted") def is_restricted(record): """Template filter to check if a record is restricted.""" return record.get('access_right') == 'restricted' and \ record.get('access_conditions') and \ record.get('owners', []) @blueprint.app_template_filter() def is_embargoed(record): """Template filter to check if a record is embargoed.""" return record.get('access_right') == 'embargoed' and \ record.get('embargo_date') and \ record.get('embargo_date') > datetime.utcnow().date() @blueprint.app_template_filter() def is_removed(record): """Template filter to check if a record is removed.""" return {'primary': 'SPAM'} in record.get('collections', []) # # Views # def access_request(pid, record, template, **kwargs): """Create an access request.""" recid = int(pid.pid_value) datastore = LocalProxy( lambda: current_app.extensions['security'].datastore) # Record must be in restricted access mode. if record.get('access_right') != 'restricted' or \ not record.get('access_conditions'): abort(404) # Record must have an owner and owner must still exists. owners = record.get('owners', []) record_owners = [datastore.find_user(id=owner_id) for owner_id in owners] if not record_owners: abort(404) sender = None initialdata = dict() # Prepare initial form data if current_user.is_authenticated: sender = current_user initialdata['email'] = current_user.email if current_user.profile: initialdata['full_name'] = current_user.profile.full_name # Normal form validation form = AccessRequestForm(formdata=request.form, **initialdata) if form.validate_on_submit(): accreq = AccessRequest.create( recid=recid, receiver=record_owners[0], sender_full_name=form.data['full_name'], sender_email=form.data['email'], justification=form.data['justification'], sender=sender ) db.session.commit() if accreq.status == RequestStatus.EMAIL_VALIDATION: flash(_( "Email confirmation needed: We have sent you an email to " "verify your address. Please check the email and follow the " "instructions to complete the access request."), category='info') else: flash(_("Access request submitted."), category='info') return redirect(url_for('invenio_records_ui.recid', pid_value=recid)) return render_template( template, pid=pid, record=record, form=form, owners=record_owners, ) def confirm(pid, record, template, **kwargs): """Confirm email address.""" recid = int(pid.pid_value) token = request.view_args['token'] # Validate token data = EmailConfirmationSerializer.compat_validate_token(token) if data is None: flash(_("Invalid confirmation link."), category='danger') return redirect(url_for("invenio_records_ui.recid", pid_value=recid)) # Validate request exists. r = AccessRequest.query.get(data['id']) if not r: abort(404) # Confirm email address. if r.status != RequestStatus.EMAIL_VALIDATION: abort(404) r.confirm_email() db.session.commit() flash(_("Email validated and access request submitted."), category='info') return redirect(url_for("invenio_records_ui.recid", pid_value=recid))
zenodo-accessrequests
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/zenodo_accessrequests/views/requests.py
requests.py
# Zenodo API Client ## Introduction The Zenodo API Client is a simplistic wrapper around the [Zenodo REST API](https://developers.zenodo.org/). It supports creation, file upload, metadata annotation, deletion and publication of depositions. ## Limitations Currently it is limited to depostions of type `Dataset`. ## Installation ```bash pip install zenodo-api-client ``` ## Example ```python from zenodo_client import * client = ZenodoClient( host='sandbox.zenodo.org', # for real: zenodo.org access_token=access_token # personal access token from Zenodo ) # create a deposition on zenodo depo = client.new_deposition() # add metadata metadata = MetaData( title='some title', description='some description', notes='some notes', creators=[Creator(name='some creator', affiliation='some affiliation')], license='CC-BY-4.0' # one of the identifiers from https://spdx.org/licenses/ ) client.set_metadata(deposition_id=depo['id'], metadata=metadata) # upload a file client.file_upload(deposition_id=depo['id'], path=Path('some/file')) # publish the deposition client.publish(deposition_id=depo['id']) ```
zenodo-api-client
/zenodo-api-client-0.0.1.tar.gz/zenodo-api-client-0.0.1/README.md
README.md
from dataclasses import dataclass, asdict from enum import Enum, auto import json from typing import List class ContributorType(Enum): ContactPerson =auto() DataCollector =auto() DataCurator =auto() DataManager =auto() Distributor =auto() Editor =auto() HostingInstitution =auto() Other =auto() Producer =auto() ProjectLeader =auto() ProjectManager =auto() ProjectMember =auto() RegistrationAgency =auto() RegistrationAuthority =auto() RelatedPerson =auto() ResearchGroup =auto() Researcher =auto() RightsHolder =auto() Sponsor =auto() Supervisor =auto() WorkPackageLeader =auto() @property def label(self): return self.name class Relation(Enum): isCitedBy =auto() cites =auto() isSupplementTo =auto() isSupplementedBy =auto() references =auto() isReferencedBy =auto() isPublishedIn =auto() isNewVersionOf =auto() isPreviousVersionOf =auto() isContinuedBy =auto() continues =auto() isDescribedBy =auto() describes =auto() isPartOf =auto() hasPart =auto() isReviewedBy =auto() reviews =auto() isDocumentedBy =auto() documents =auto() compiles =auto() isCompiledBy =auto() isDerivedFrom =auto() isSourceOf =auto() requires =auto() isRequiredBy =auto() isObsoletedBy =auto() obsoletes =auto() isIdenticalTo =auto() isAlternateIdentifier =auto() @property def label(self): return self.name class ResourceType(Enum): publication =auto() publication_annotationcollection =auto() publication_book =auto() publication_section =auto() publication_conferencepaper =auto() publication_datamanagementplan =auto() publication_article =auto() publication_other =auto() publication_patent =auto() publication_preprint =auto() publication_deliverable =auto() publication_milestone =auto() publication_proposal =auto() publication_report =auto() publication_softwaredocumentation =auto() publication_taxonomictreatment =auto() publication_technicalnote =auto() publication_thesis =auto() publication_workingpaper =auto() dataset =auto() image =auto() image_diagram =auto() image_drawing =auto() image_figure =auto() image_other =auto() image_photo =auto() image_plot =auto() lesson =auto() other =auto() physicalobject =auto() poster =auto() presentation =auto() software =auto() video =auto() workflow =auto() @property def label(self): return self.name.replace('_', '-') class UploadType(Enum): publication =auto() dataset =auto() image =auto() lesson =auto() other =auto() physicalobject =auto() poster =auto() presentation =auto() software =auto() video =auto() workflow =auto() @property def label(self): return self.name @dataclass class Contributor(): name :str affiliation :str orcid :str =None type :ContributorType =ContributorType.DataCollector @dataclass class Creator(): name :str affiliation :str @dataclass class Identifier(): identifier :str relation :Relation resource_type :ResourceType =None scheme :str =None # set by Zenodo @dataclass class Subject(): term :str identifier :str # e.g., https://some.url/id scheme :str =None # set by Zenodo @dataclass class MetaData(): title :str description :str upload_type :UploadType =UploadType.dataset # fixed access_right :str =None contributors :List[Contributor] =None creators :List[Creator] =None doi :str =None keywords :List[str] =None language :str =None license :str =None notes :str =None publication_date :str =None related_identifiers :List[Identifier] =None subjects :List[Subject] =None version :str =None def to_json(self): sparse_dict = { x: [{ xx:yy.label if isinstance(yy, Enum) else yy for xx,yy in i.items() if yy is not None } if isinstance(i, dict) else i for i in y ] if isinstance(y, list) else y.label if isinstance(y, Enum) else y for x,y in asdict(self).items() if y is not None } return json.dumps({"metadata": sparse_dict})
zenodo-api-client
/zenodo-api-client-0.0.1.tar.gz/zenodo-api-client-0.0.1/src/zenodo_client/model.py
model.py
from pathlib import Path import requests from .model import * def fail(status_code:int) -> Exception: class _(Exception): pass _.__name__ = f'E{status_code}' return _ class ZenodoClient: def __init__(self, host:str, access_token:str): self.access_token = access_token self.host = host def _evaluate(self, r:requests.Response): if 200 > r.status_code or r.status_code > 299: raise fail(r.status_code)(r.json()) return r.json() def list_depositions(self) -> list: """Lists all depositions from the user, including draft depostions. Returns ------- list of dict Depostion data for each deposition """ r = requests.get( f'https://{self.host}/api/deposit/depositions', params={'access_token':self.access_token} ) return self._evaluate(r) def new_deposition(self) -> dict: """Creates a draft deposition. Returns ------- dict Depostion data """ r = requests.post( f'https://{self.host}/api/deposit/depositions', params={'access_token':self.access_token}, json={} ) return self._evaluate(r) def file_upload(self, deposition_id:int, path:Path) -> dict: """Uploads a file to a draft deposition Parameters ---------- deposition_id : int Zenodo deposition id path : Path local path to the file to be uploaded Returns ------- dict Deposition data """ bucket_url = self.get(deposition_id)['links']['bucket'] with path.open('rb') as fp: r = requests.put( f'{bucket_url}/{path.name}', params={'access_token':self.access_token}, data=fp ) return self._evaluate(r) def file_delete(self, deposition_id:int, filename:str): """Deletes all uploaded files with the given name from a deposition. Parameters ---------- deposition_id : int Zenodo deposition id filename : str File name """ urls = [file['links']['self'] for file in self.get(deposition_id) if file['filename'] == filename] for url in urls: r = requests.delete( url, params={'access_token':self.access_token} ) if r.status_code != 204: raise fail(r.status_code)(f"failed to delete {url} ({filename}): {r.json()}") def set_metadata(self, deposition_id:int, metadata:MetaData) -> dict: """Sets metadata on the deposition draft. All at once. Parameters ---------- deposition_id : int Zenodo deposition id metadata : MetaData Complete meta data. All current values will be deleted. Returns ------- dict Updated deposition data """ r = requests.put( f"https://{self.host}/api/deposit/depositions/{deposition_id}", params={'access_token': self.access_token}, data=metadata.to_json(), headers={"Content-Type": "application/json"}) return self._evaluate(r) def delete(self, deposition_id:int) -> dict: """Deletes a draft deposition. Parameters ---------- deposition_id : int Zenodo deposition id Returns ------- dict Depostion data """ depo_url = f'https://{self.host}/api/deposit/depositions/{deposition_id}' r = requests.delete( depo_url, params={'access_token':self.access_token} ) if r.status_code != 204: raise fail(r.status_code)(f"failed to delete {depo_url}: {r.json()}") def publish(self, deposition_id:int) -> dict: """Publishes a draft deposition. After calling publish it cannot be altered or deleted anymore. Parameters ---------- deposition_id : int Zenodo deposition id Returns ------- dict Depostion data """ r = requests.post( f"https://{self.host}/api/deposit/depositions/{deposition_id}/actions/publish", params={'access_token': self.access_token}, headers={"Content-Type": "application/json"} ) return self._evaluate(r) def discard(self, deposition_id:int) -> dict: """Discards a published deposition. After calling discard the deposition has the status 'discarded' and is hidden from Zenodo users. To be confirmed! Parameters ---------- deposition_id : int Zenodo deposition id Returns ------- dict Depostion data """ r = requests.post( f"https://{self.host}/api/deposit/depositions/{deposition_id}/actions/discard", params={'access_token': self.access_token}, headers={"Content-Type": "application/json"} ) return self._evaluate(r) def get(self, deposition_id:int) -> dict: """Zenodo deposition data. Parameters ---------- deposition_id : int Zenodo deposition id Returns ------- dict Depostion data """ r = requests.get( f"https://{self.host}/api/deposit/depositions/{deposition_id}", params={'access_token': self.access_token}, headers={"Content-Type": "application/json"} ) return self._evaluate(r)
zenodo-api-client
/zenodo-api-client-0.0.1.tar.gz/zenodo-api-client-0.0.1/src/zenodo_client/__init__.py
__init__.py
# zenodo_backpack ZenodoBackpack provides a robust, standardised and repeatable approach to distributing and using backend databases that bioinformatic tools rely on. These databases are usually tool-specific and are often large enough in size that they cannot be uploaded as data to software repositories (e.g. PyPI imposes a limit of ~50MB). ZenodoBackpack uploads/downloads data to/from [Zenodo](https://zenodo.org), which means that each dataset is associated with a DOI. Additionally, it encapsulates the uploaded data in a Zenodo Backpack format, which is really just a `CONTENTS.json` file and compresses the data in `.tar.gz` format before upload. The `CONTENTS.json` file includes md5sum values for each included file for robust verification. It contains two main methods, which can bee accessed through the `zenodo_backpack` script or accessed as a software library: **create**: turns a target directory into a zenodo_backpack-formatted .tar.gz archive with relevant checksum and version information, ready to be uploaded to Zenodo. It is necessary to provide a data version when doing so - furthermore, when uploading this backpack to zenodo.org, the version specified on the website **must** match that provided when the ZenodoBackpack was created. This allows version tracking and version validation of the data contained within the ZenodoBackpack. **download_and_extract**: takes a DOI string to download, extract and verify a zenodo_backpack archive from Zenodo.org to target directory. This returns a ZenodoBackpack object that can be queried for information. # Usage ## Command line You can run zenodo_backpack as a stand-alone program, or import its classes and use them in source code. In command line, zenodo_backpack can create an archive to be uploaded to Zenodo: ``` zenodo_backpack create --input_directory <./INPUT_DIRECTORY> --data_version <VERSION> --output_file <./ARCHIVE.tar.gz> ``` **NOTE**: it is important that when entering metadata on Zenodo, the version specified **MUST** match that supplied with --data_version An uploaded existing zenodo_backpack can be downloaded (--bar if a graphical progress bar is desired) and unpacked as follows: ``` zenodo_backpack download --doi <MY.DOI/111> --output_directory <OUTPUT_DIRECTORY> --bar ``` ## API Usage You can also import zenodo_backpack as a module: ``` import zenodo_backpack ``` Backpacks can be created, downloaded and acquired from a local store: ### Create a backpack Create a new backpack in `.tar.gz` format containing the payload data folder: ``` creator = zenodo_backpack.ZenodoBackpackCreator() creator.create("/path/to/payload_directory", "path/to/archive.tar.gz", "0.1") ``` ### Download a backpack Download a backpack from Zenodo, defined by the DOI: ``` backpack_downloader = zenodo_backpack.ZenodoBackpackDownloader() backpack = backpack_downloader.download_and_extract('/path/to/download_directory', 'MY.DOI/111111') ``` ### Read a backpack that is already downloaded Defined by a path ``` backpack = zenodo_backpack.acquire(path='/path/to/zenodobackpack/', md5sum=True) ``` or by environment variable ``` backpack = zenodo_backpack.acquire(env_var_name='MY_PROGRAM_DB', version="1.5.2") ``` ### Working with a backpack The `ZenodoBackpack` object returned by `acquire` and `download_and_extract` has instance methods to get at the downloaded data. For example, it can return the path to the payload directory within the `ZenodoBackpack` containing all the payload data: ``` useful_data_path = zenodo_backpack.acquire(env_var_name='MyZenodoBackpack', version="1.5.2").payload_directory_string() ``` # Installation The easiest way to install is using conda: ``` conda install -c bioconda zenodo_backpack ``` Alternatively, you can git clone the repository and either run the bin/zenodo_backpack executable or install it with setup tools using ``` python setup.py install ``` zenodo_backpack relies on **requests** and **tqdm** to display an optional graphical progress bar.
zenodo-backpack
/zenodo_backpack-0.2.0.tar.gz/zenodo_backpack-0.2.0/README.md
README.md
import hashlib import requests import shutil import os import logging import json from tqdm import tqdm import tarfile import tempfile import sys from .version import __version__ class ZenodoBackpackMalformedException(Exception): pass # No implementation needed class ZenodoBackpackVersionException(Exception): pass class ZenodoConnectionException(Exception): pass class BrokenSymlinkException(Exception): pass CURRENT_ZENODO_BACKPACK_VERSION = 1 PAYLOAD_DIRECTORY_KEY = 'payload_directory' PAYLOAD_DIRECTORY = 'payload_directory' DATA_VERSION = 'data_version' ZB_VERSION = 'zenodo_backpack_version' class ZenodoBackpack: def __init__(self, base_directory): self.base_directory = base_directory try: with open(os.path.join(self.base_directory, 'CONTENTS.json')) as jsonfile: self.contents = json.load(jsonfile) except: raise ZenodoBackpackMalformedException('Failed to load CONTENTS.json') #self.zenodo_backpack_version = self.contents[ZB_VERSION] #self.data_version = self.contents[DATA_VERSION] def payload_directory_string(self, enter_single_payload_directory=False): '''Returns the payload directory string. Parameters ---------- enter_single_payload_directory: bool If True, the payload directory contains a single directory. Return that directory instead of the payload directory itself. ''' payload_dir = os.path.join(self.base_directory, self.contents[PAYLOAD_DIRECTORY_KEY]) if enter_single_payload_directory: files = os.listdir(payload_dir) if len(files) != 1: raise ZenodoBackpackMalformedException( 'Payload directory contains more than one file, but enter_single_payload_directory was set to True.') payload_dir = os.path.join(payload_dir, files[0]) if not os.path.isdir(payload_dir): raise ZenodoBackpackMalformedException( 'Payload directory contains a file, not a directory, but enter_single_payload_directory was set to True.') return payload_dir else: return payload_dir def data_version_string(self): return self.contents[DATA_VERSION] def zenodo_backpack_version_string(self): return self.contents[ZB_VERSION] def acquire(path=None, env_var_name=None, md5sum=False, version=None): ''' Look for folder corresponding to a path or environmental variable and return it. Parameters ---------- path: str Path to the backpack. Cannot be used with env_var_name. env_var_name: str Name of an environment variable that contains a path to a backpack md5sum: bool If True, use the contents.json file to verify files. version: str Excpected version of the backpack. If not provided, the version in the CONTENTS.json file is checked. Raises ------ ZenodoBackpackMalformedException: If the environment variable does not point to a valid ZenodoBackpack i.e. a directory with a CONTENTS.json in it. ZenodoBackpackVersionException: If not expected Backpack version ''' if path: logging_description = "Path {}".format(path) basefolder = path elif env_var_name: logging_description = f"Environment variable {env_var_name}" if env_var_name not in os.environ: raise ZenodoBackpackMalformedException(f'Environment variable {env_var_name} was undefined, when it should define the path to the ZenodoBackpack data.') else: basefolder = os.environ[env_var_name] else: raise ZenodoBackpackMalformedException() if os.path.isdir(basefolder): if 'CONTENTS.json' in os.listdir(basefolder): logging.info('Retrieval successful. Location of backpack is: {}'.format(basefolder)) zb = ZenodoBackpack(basefolder) if version: if version != zb.data_version_string(): raise ZenodoBackpackMalformedException( f'Version in CONTENTS.json: {zb.data_version_string()} does not match version provided: {version}') if md5sum: ZenodoBackpackDownloader().verify(basefolder) return zb else: raise ZenodoBackpackMalformedException(f"{logging_description} does not contain a CONTENTS.json file, so is not a valid ZenodoBackpack") else: raise ZenodoBackpackMalformedException(f"{logging_description} is not a directory so cannot hold a ZenodoBackpack") class ZenodoBackpackDownloader: def download_and_extract(self, directory, doi, check_version=True, progress_bar=False, download_retries=3): """Actually do the download, to a given path. Also extract the archive, and then call verify on it. Parameters ---------- directory: str Where to download to doi: str DOI of the Zenodo series progress_bar: bool If True, display graphical progress bar while downloading from Zenodo check_version: bool If True, check Zenodo metadata verifies download_retries: int Number of download attempts Returns a ZenodoBackpack object containing the downloaded files """ self._make_sure_path_exists(directory) # get record via DOI, then read in json metadata from records_url if doi is not None: recordID = self._retrieve_record_ID(doi) metadata, files = self._retrieve_record_metadata(recordID) # create md5sums file for download with open(os.path.join(directory, 'md5sums.txt'), 'wt') as md5file: for file in files: fname = str(file['key']).split('/')[-1] checksum = str(file['checksum']).split(':')[-1] md5file.write('{},{}\n'.format(checksum, fname)) for f in files: link = f['links']['self'] filename = f['key'].split('/')[-1] checksum = f['checksum'] # 3 retries for _ in range(download_retries): try: self._download_file(link, os.path.join(directory, filename), progress_bar) except Exception as e: logging.error('Error during download: {}'.format(e)) raise ZenodoConnectionException else: break else: raise ZenodoConnectionException('Too many unsuccessful retries. Download is aborted') #self.verify(acquire(directory), metadata=metadata) if self._check_hash(os.path.join(directory, filename), checksum): logging.debug('Correct checksum for downloaded file.') else: raise ZenodoBackpackMalformedException( f"Checksum is incorrect for downloaded file '{filename}'. Please download again.") else: logging.debug('All files have been downloaded.') else: raise ZenodoConnectionException('Record could not get accessed.') # unzip # use md5sums.txt file created from metadata to get files downloaded_files = [[str(i) for i in line.strip().split(',')] for line in open(os.path.join(directory, 'md5sums.txt'), 'r').readlines()] zipped_files = [item for sublist in downloaded_files for item in sublist if '.tar.gz' in item] logging.info('Extracting files from archive...') for f in zipped_files: filepath = (os.path.join(directory, f)) logging.debug('Extracting {}'.format(filepath)) tf = tarfile.open(filepath) zb_folder = os.path.commonprefix(tf.getnames()) tf.extractall(directory) os.remove(filepath) os.remove(os.path.join(directory, 'md5sums.txt')) zb_folder = os.path.abspath(os.path.join(directory, zb_folder)) with open(os.path.join(zb_folder, 'CONTENTS.json')) as json_file: contents = json.load(json_file) zb = ZenodoBackpack(zb_folder) if not check_version: self.verify(zb) else: self.verify(zb, metadata=metadata) return zb def verify(self, zenodo_backpack, metadata=None, passed_version=None): """Verify that a downloaded directory is in working order. If metadata downloaded from Zenodo is provided, it will be checked as well. Reads <CONTENTS.json> (file) within directory containing md5 sums for files a single extracted payload folder with an arbitrary name Parameters ---------- directory: str Location of downloaded and extracted data metadata: json dict Downloaded metadata from Zenodo containing version information passed_version: str Passed specific version to verify Returns nothing if verification works, otherwise raises ZenodoBackpackMalformedException or ZenodoBackpackVersionException """ # extract identifying keys for version and zenodo_backpack_version version = zenodo_backpack.data_version_string() zenodo_backpack_version = zenodo_backpack.zenodo_backpack_version_string() payload_folder = zenodo_backpack.payload_directory_string() if metadata: logging.info('Verifying version and checksums...') metadata_ver = str(metadata['metadata']['version']).strip() if str(version).strip() != metadata_ver: raise ZenodoBackpackMalformedException( f'Version in CONTENTS.json: {version} does not match version in Zenodo metadata: {metadata_ver}') elif passed_version: logging.info('Verifying version and checksums...') if str(version).strip() != str(passed_version).strip(): raise ZenodoBackpackMalformedException( f'Version in CONTENTS.json: {version} does not match version provided: {passed_version}') else: logging.warning('Not using version verification.') logging.info('Verifying checksums...') if zenodo_backpack_version != CURRENT_ZENODO_BACKPACK_VERSION: raise ZenodoBackpackVersionException('Incorrect ZENODO Backpack version: {} Expected: {}' .format(zenodo_backpack_version, CURRENT_ZENODO_BACKPACK_VERSION)) # The rest of contents should only be files with md5 sums. for payload_file in zenodo_backpack.contents['md5sums'].keys(): filepath = os.path.join(os.path.split(payload_folder)[0], payload_file[1:]) # remove slash to enable os.path.join if not self._check_hash(filepath, zenodo_backpack.contents['md5sums'][payload_file], metadata=False): raise ZenodoBackpackMalformedException('Extracted file md5 sum does not match that in JSON file.') logging.info('Verification success.') def _retrieve_record_ID(self, doi): """Parses provided DOI retrieve associated Zenodo URL which also contains record ID Arguments: DOI (str): published DOI associated with file uploaded to Zenodo Returns: recordID (str): last part of Zenodo url associated with DOI """ if not doi.startswith('http'): doi = 'https://doi.org/' + doi try: logging.debug(f"Retrieving URL {doi}") r = requests.get(doi, timeout=15.) except Exception as e: raise ZenodoConnectionException('Connection error: {}'.format(e)) if not r.ok: raise ZenodoConnectionException('DOI could not be resolved. Check your DOI is correct.') recordID = r.url.split('/')[-1].strip() return recordID def _retrieve_record_metadata(self, recordID): """Parses provided recordID to access Zenodo API records and download metadata json Arguments: recordID (str): Zenodo record number Returns: js (json object): json metadata file retrieved from Zenodo API. js['files'] (list): list of files associated with recordID in question """ records_url = 'https://zenodo.org/api/records/' try: r = requests.get(records_url + recordID, timeout=15.) except Exception as e: raise ZenodoConnectionException('Error during metadata retrieval: {}'.format(e)) if r.ok: js = json.loads(r.text) return js, js['files'] def _check_hash(self, filename, checksum, metadata=True): """Compares MD5 sum of file to checksum Arguments: filename (str): Path of file to md5sum checkmsum: (str): md5 checksum returns True if checksum is correct """ if metadata: algorithm, value = checksum.split(':') else: algorithm = 'md5' value = checksum if not os.path.exists(filename): raise FileNotFoundError(filename) h = hashlib.new(algorithm) with open(filename, 'rb') as f: while True: data = f.read(4096) if not data: break h.update(data) digest = h.hexdigest() return value == digest def _download_file(self, file_url, out_file, progress_bar=False): """Download a file to disk Streams a file from URL to disk. Can optionally use tqdm for a visual download bar Arguments: file_url (str): URL of file to download out_file (str): Target file path progress_bar (bool): Display graphical progresss bar """ if progress_bar: logging.info('Downloading {} to {}.'.format(file_url, out_file)) response = requests.get(file_url, stream=True) total_size_in_bytes = int(response.headers.get('content-length', 0)) block_size = 1024 progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) with open(out_file, 'wb') as file: for data in response.iter_content(block_size): progress_bar.update(len(data)) file.write(data) progress_bar.close() else: with requests.get(file_url, stream=True) as r: with open(out_file, 'wb') as f: shutil.copyfileobj(r.raw, f) def _extract_all(self, archive, extract_path): for filename in archive: shutil.unpack_archive(filename, extract_path) def _make_sure_path_exists(self, path): """Create directory if it does not exist.""" if not path: return if not os.path.exists(path): try: os.makedirs(path) except Exception as e: logging.error('Specified path does not exist: ' + path + '\n') raise e class ZenodoBackpackCreator: def create(self, input_directory, output_file, data_version, force=False): """Creates Zenodo backpack Parameters: ---------- input_directory: str Files to be packaged output_file: str Archive .tar.gz to be created. Automatically appends '.tar.gz' if needed force: True or False If True, overwrite an existing output_file if required. If False, don't overwrite. data_version: Passes the data version of the file to archive NOTE!! Same version must be specified in Zenodo metadata when file is uploaded, else error. Returns nothing, unless input_directory is not a directory or output_file exists, which raises Exceptions """ if not str(output_file).endswith('.tar.gz'): output_file = os.path.join('{}.zb.tar.gz'.format(str(output_file))) if os.path.isfile(output_file) and force is False: raise FileExistsError('File exists. Please use --force to overwrite existing archives.') elif os.path.isfile(output_file) and force is True: os.remove(output_file) if not os.path.isdir(input_directory): raise NotADirectoryError('Only the archiving of directories is currently supported.') if os.path.isdir(output_file): raise IsADirectoryError('Cannot specify existing directory as output. Output must be named *.tar.gz file.') logging.info('Reading files and calculating checksums.') # recursively get a list of files in the input_directory and md5 sum for each file try: _, filenames = self._scandir(input_directory) except Exception as e: logging.error(e) raise e # Generate md5 sums & make JSON relative to input_directory folder parent_dir = str(os.path.abspath(os.path.join(input_directory, os.pardir))) base_folder = os.path.basename(os.path.normpath(input_directory)) contents = {} contents['md5sums'] = {str(file).replace(parent_dir, "").replace(base_folder, PAYLOAD_DIRECTORY): self._md5sum_file(file) for file in filenames} # add metadata to contents: contents[ZB_VERSION] = CURRENT_ZENODO_BACKPACK_VERSION contents[DATA_VERSION] = data_version contents[PAYLOAD_DIRECTORY_KEY] = PAYLOAD_DIRECTORY # write json to /tmp tmpdir = tempfile.TemporaryDirectory() contents_json = os.path.join(tmpdir.name, 'CONTENTS.json') with open(contents_json, 'w') as c: json.dump(contents, c) logging.info('Creating archive at: {}'.format(output_file)) archive = tarfile.open(os.path.join(output_file), "w|gz", dereference=True) root_folder_name = f'{base_folder}.zb' archive.add(contents_json, os.path.join(root_folder_name, 'CONTENTS.json')) archive.add(input_directory, arcname=os.path.join(root_folder_name, PAYLOAD_DIRECTORY)) archive.close() tmpdir.cleanup() logging.info('ZenodoBackpack created successfully!') def _md5sum_file(self, file): """Computes MD5 sum of file. Arguments: file (str): Path of file to md5sum Returns: str: md5sum """ block_size = 4096 m = hashlib.md5() with open(file, 'rb') as f: while True: data = f.read(block_size) if not data: return m.hexdigest() m.update(data) def _scandir(self, dir): """Recursively scans directory Arguments: dir (str): Path of directory to scan Returns: subfolders: (list) list of all subfolders. Primarily used for recursion. files: (list) list of the paths of all files in directory """ subfolders, files = [], [] for f in os.scandir(dir): if f.is_dir(): subfolders.append(f.path) if os.path.islink(f.path): #make sure symlink is not broken if not os.path.exists(os.path.abspath(f.path)): raise BrokenSymlinkException else: files.append(f.path) if f.is_file(): files.append(os.path.abspath(f.path)) for dir in list(subfolders): sf, f = self._scandir(dir) subfolders.extend(sf) files.extend(f) return subfolders, files
zenodo-backpack
/zenodo_backpack-0.2.0.tar.gz/zenodo_backpack-0.2.0/zenodo_backpack/__init__.py
__init__.py
<!-- <p align="center"> <img src="docs/source/logo.png" height="150"> </p> --> <h1 align="center"> Zenodo Client </h1> <p align="center"> <a href="https://github.com/cthoyt/zenodo-client/actions?query=workflow%3ATests"> <img alt="Tests" src="https://github.com/cthoyt/zenodo-client/workflows/Tests/badge.svg" /> </a> <a href="https://github.com/cthoyt/cookiecutter-python-package"> <img alt="Cookiecutter template from @cthoyt" src="https://img.shields.io/badge/Cookiecutter-snekpack-blue" /> </a> <a href="https://pypi.org/project/zenodo_client"> <img alt="PyPI" src="https://img.shields.io/pypi/v/zenodo_client" /> </a> <a href="https://pypi.org/project/zenodo_client"> <img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/zenodo_client" /> </a> <a href="https://github.com/cthoyt/zenodo-client/blob/main/LICENSE"> <img alt="PyPI - License" src="https://img.shields.io/pypi/l/zenodo_client" /> </a> <a href='https://zenodo_client.readthedocs.io/en/latest/?badge=latest'> <img src='https://readthedocs.org/projects/zenodo_client/badge/?version=latest' alt='Documentation Status' /> </a> <a href="https://zenodo.org/badge/latestdoi/343513445"> <img src="https://zenodo.org/badge/343513445.svg" alt="DOI"> </a> </p> A wrapper for the Zenodo API. ## 💪 Getting Started The first example shows how you can set some configuration then never worry about whether it's been uploaded already or not - all baked in with [`pystow`](https://github.com/cthoyt/pystow). On the first time this script is run, the new deposition is made, published, and the identifier is stored with the given key in your `~/.config/zenodo.ini`. Next time it's run, the deposition will be looked up, and the data will be uploaded. Versioning is given automatically by date, and if multiple versions are uploaded on one day, then a dash and the revision are appended. ```python from zenodo_client import Creator, Metadata, ensure_zenodo # Define the metadata that will be used on initial upload data = Metadata( title='Test Upload 3', upload_type='dataset', description='test description', creators=[ Creator( name='Hoyt, Charles Tapley', affiliation='Harvard Medical School', orcid='0000-0003-4423-4370', ), ], ) res = ensure_zenodo( key='test3', # this is a unique key you pick that will be used to store # the numeric deposition ID on your local system's cache data=data, paths=[ '/Users/cthoyt/Desktop/test1.png', ], sandbox=True, # remove this when you're ready to upload to real Zenodo ) from pprint import pprint pprint(res.json()) ``` A real-world example can be found here: https://github.com/cthoyt/nsockg. The following example shows how to use the Zenodo uploader if you already know what your deposition identifier is. ```python from zenodo_client import update_zenodo # The ID from your deposition SANDBOX_DEP_ID = '724868' # Paths to local files. Good to use in combination with resources that are always # dumped to the same place by a given script paths = [ # os.path.join(DATABASE_DIRECTORY, 'alts_sample.tsv') '/Users/cthoyt/Desktop/alts_sample.tsv', ] # Don't forget to set the ZENODO_API_TOKEN environment variable or # any valid way to get zenodo/api_token from PyStow. update_zenodo(SANDBOX_DEP_ID, paths) ``` The following example shows how to look up the latest version of a record. ```python from zenodo_client import Zenodo zenodo = Zenodo() OOH_NA_NA_RECORD = '4020486' new_record = zenodo.get_latest_record(OOH_NA_NA_RECORD) ``` Even further, the latest version of `names.tsv.gz` can be automatically downloaded to the `~/.data/zenodo/<conceptrecid>/<version>/<path>` via `pystow` with: ```python from zenodo_client import Zenodo zenodo = Zenodo() OOH_NA_NA_RECORD = '4020486' new_record = zenodo.download_latest(OOH_NA_NA_RECORD, 'names.tsv.gz') ``` A real-world example can be found [here](https://github.com/pyobo/pyobo/blob/master/src/pyobo/resource_utils.py) where the latest build of the [Ooh Na Na](https://cthoyt.com/2020/04/18/ooh-na-na.html) nomenclature database is automatically downloaded from Zenodo, even though the PyOBO package only hardcodes the first deposition ID. ### Command Line Interface The zenodo_client command line tool is automatically installed. It can be used from the shell with the `--help` flag to show all subcommands: ```shell $ zenodo_client --help ``` It can be run with `zenodo_client <deposition ID> <path 1> ... <path N>` ## ⬇️ Installation The most recent release can be installed from [PyPI](https://pypi.org/project/zenodo_client/) with: ```bash $ pip install zenodo_client ``` The most recent code and data can be installed directly from GitHub with: ```bash $ pip install git+https://github.com/cthoyt/zenodo-client.git ``` To install in development mode, use the following: ```bash $ git clone git+https://github.com/cthoyt/zenodo-client.git $ cd zenodo-client $ pip install -e . ``` ## ⚖️ License The code in this package is licensed under the MIT License. ## 🙏 Contributing Contributions, whether filing an issue, making a pull request, or forking, are appreciated. See [CONTRIBUTING.rst](https://github.com/cthoyt/zenodo-client/blob/master/CONTRIBUTING.rst) for more information on getting involved. ## 🍪 Cookiecutter Acknowledgement This package was created with [@audreyr](https://github.com/audreyr)'s [cookiecutter](https://github.com/cookiecutter/cookiecutter) package using [@cthoyt](https://github.com/cthoyt)'s [cookiecutter-python-package](https://github.com/cthoyt/cookiecutter-python-package) template. ## 🛠️ Development The final section of the README is for if you want to get involved by making a code contribution. ### ❓ Testing After cloning the repository and installing `tox` with `pip install tox`, the unit tests in the `tests/` folder can be run reproducibly with: ```shell $ tox ``` Additionally, these tests are automatically re-run with each commit in a [GitHub Action](https://github.com/cthoyt/zenodo-client/actions?query=workflow%3ATests). ### 📦 Making a Release After installing the package in development mode and installing `tox` with `pip install tox`, the commands for making a new release are contained within the `finish` environment in `tox.ini`. Run the following from the shell: ```shell $ tox -e finish ``` This script does the following: 1. Uses BumpVersion to switch the version number in the `setup.cfg` and `src/zenodo_client/version.py` to not have the `-dev` suffix 2. Packages the code in both a tar archive and a wheel 3. Uploads to PyPI using `twine`. Be sure to have a `.pypirc` file configured to avoid the need for manual input at this step 4. Push to GitHub. You'll need to make a release going with the commit where the version was bumped. 5. Bump the version to the next patch. If you made big changes and want to bump the version by minor, you can use `tox -e bumpversion minor` after.
zenodo-client
/zenodo_client-0.3.2.tar.gz/zenodo_client-0.3.2/README.md
README.md
import datetime from typing import Optional, Sequence from pydantic import BaseModel, Field from typing_extensions import Literal __all__ = [ "Creator", "Metadata", ] # https://developers.zenodo.org/#rest-api class Creator(BaseModel): """A creator, see https://developers.zenodo.org/#representation.""" name: str = Field( ..., description="Name of the creator in the format Family name, given names", example="Hoyt, Charles Tapley" ) affiliation: Optional[str] = Field(description="affiliation of the creator", example="Harvard Medical School") orcid: Optional[str] = Field(description="ORCID identifier of the creator", example="0000-0003-4423-4370") gnd: Optional[str] = Field( description="German National Library identifier of the creator. " "See also https://www.wikidata.org/wiki/Property:P227." ) @property def orcid_url(self) -> Optional[str]: """Get the ORCID identifier as a URL.""" return f"https://orcid.org/{self.orcid}" if self.orcid else None @property def gnd_url(self) -> Optional[str]: """Get the GND identifier as a URL.""" return f"https://d-nb.info/gnd/{self.gnd}" def __post_init__(self): # noqa:D105 if "," not in self.name: raise ValueError("name should be in format Family name, given names") UploadType = Literal[ "publication", "poster", "presentation", "dataset", "image", "video", "software", "lesson", "physicalobject", "other", ] PublicationType = Literal[ "annotationcollection", "book", "section", "conferencepaper", "datamanagementplan", "article", "patent", "preprint", "deliverable", "milestone", "proposal", "report", "softwaredocumentation", "taxonomictreatment", "technicalnote", "thesis", "workingpaper", "other", ] ImageType = Literal[ "figure", "plot", "drawing", "diagram", "photo", "other", ] AccessRight = Literal[ "open", "embargoed", "restricted", "closed", ] def _today_str() -> str: return datetime.datetime.today().strftime("%Y-%m-%d") class Metadata(BaseModel): """Metadata for the Zenodo deposition API.""" title: str upload_type: UploadType description: str creators: Sequence[Creator] access_right: AccessRight = "open" language: Optional[str] = "eng" version: Optional[str] = Field(default_factory=_today_str) license: Optional[str] = "CC0-1.0" publication_type: Optional[PublicationType] = None image_type: Optional[ImageType] = None def __post_init__(self): # noqa:D105 if self.upload_type == "publication": if self.publication_type is None: raise ValueError("missing publication_type") elif self.upload_type == "image": if self.image_type is None: raise ValueError("missing image_type") if self.access_right in {"open", "embargoed"}: if self.license is None: raise ValueError(f"need a license for access_right={self.access_right}")
zenodo-client
/zenodo_client-0.3.2.tar.gz/zenodo_client-0.3.2/src/zenodo_client/struct.py
struct.py
import datetime import logging import os import time from pathlib import Path from typing import Any, Callable, Iterable, List, Mapping, Optional, Sequence, Union import pystow import requests from .struct import Metadata __all__ = [ "ensure_zenodo", "update_zenodo", "create_zenodo", "download_zenodo", "download_zenodo_latest", "Zenodo", ] logger = logging.getLogger(__name__) Data = Union[Mapping[str, Any], Metadata] PartsFunc = Callable[[str, str, str], Sequence[str]] PartsHint = Union[None, Sequence[str], PartsFunc] Paths = Union[str, Path, Iterable[str], Iterable[Path]] def ensure_zenodo(key: str, data: Data, paths: Paths, **kwargs) -> requests.Response: """Create a Zenodo record if it doesn't exist, or update one that does.""" return Zenodo(**kwargs).ensure(key=key, data=data, paths=paths) def create_zenodo(data: Data, paths: Paths, **kwargs) -> requests.Response: """Create a Zenodo record.""" return Zenodo(**kwargs).create(data, paths) def update_zenodo(deposition_id: str, paths: Paths, **kwargs) -> requests.Response: """Update a Zenodo record.""" return Zenodo(**kwargs).update(deposition_id, paths) def download_zenodo(deposition_id: str, name: str, force: bool = False, **kwargs) -> Path: """Download a Zenodo record.""" return Zenodo(**kwargs).download(deposition_id, name=name, force=force) def download_zenodo_latest(deposition_id: str, path: str, force: bool = False, **kwargs) -> Path: """Download the latest Zenodo record.""" return Zenodo(**kwargs).download_latest(deposition_id, name=path, force=force) class Zenodo: """A wrapper around parts of the Zenodo API.""" def __init__(self, access_token: Optional[str] = None, sandbox: bool = False): """Initialize the Zenodo class. :param access_token: The Zenodo API. Read with :mod:`pystow` from zenodo/api_token of zenodo/sandbox_api_token if in sandbox mode. :param sandbox: If true, run in the Zenodo sandbox. """ self.sandbox = sandbox if self.sandbox: self.base = "https://sandbox.zenodo.org" # Use subsection support introduced in PyStow in # https://github.com/cthoyt/pystow/pull/59 self.module = "zenodo:sandbox" self.access_token = pystow.get_config(self.module, "api_token", passthrough=access_token) if self.access_token is None: # old-style fallback self.access_token = pystow.get_config("zenodo", "sandbox_api_token", raise_on_missing=True) else: self.base = "https://zenodo.org" self.module = "zenodo" self.access_token = pystow.get_config( self.module, "api_token", passthrough=access_token, raise_on_missing=True ) # Base URL for the API self.api_base = self.base + "/api" logger.debug("using Zenodo API at %s", self.api_base) # Base URL for depositions, relative to the API base self.depositions_base = self.api_base + "/deposit/depositions" def ensure(self, key: str, data: Data, paths: Paths) -> requests.Response: """Create a Zenodo record if it doesn't exist, or update one that does.""" deposition_id = pystow.get_config(self.module, key) if deposition_id is not None: logger.info("mapped local key %s to deposition %s", key, deposition_id) return self.update(deposition_id=deposition_id, paths=paths) res = self.create(data=data, paths=paths) # Write the ID to the key in the local configuration # so it doesn't need to be created from scratch next time pystow.write_config(self.module, key, str(res.json()["id"])) return res def create(self, data: Data, paths: Paths) -> requests.Response: """Create a record. :param data: The JSON data to send to the new data :param paths: Paths to local files to upload :return: The response JSON from the Zenodo API :raises ValueError: if the response is missing a "bucket" """ if isinstance(data, Metadata): logger.debug("serializing metadata") data = { "metadata": {key: value for key, value in data.dict(exclude_none=True).items() if value}, } res = requests.post( self.depositions_base, json=data, params={"access_token": self.access_token}, ) res.raise_for_status() res_json = res.json() bucket = res_json.get("links", {}).get("bucket") if bucket is None: raise ValueError(f"No bucket in response. Got: {res_json}") logger.info("uploading files to bucket %s", bucket) self._upload_files(bucket=bucket, paths=paths) deposition_id = res_json["id"] logger.info("publishing files to deposition %s", deposition_id) return self.publish(deposition_id) def publish(self, deposition_id: str, sleep: bool = True) -> requests.Response: """Publish a record that's in edit mode. :param deposition_id: The identifier of the deposition on Zenodo. It should be in edit mode. :param sleep: Sleep for one second just in case of race conditions. If you're feeling lucky and rushed, you might be able to get away with disabling this. :return: The response JSON from the Zenodo API """ if sleep: time.sleep(1) res = requests.post( f"{self.depositions_base}/{deposition_id}/actions/publish", params={"access_token": self.access_token}, ) res.raise_for_status() return res def update(self, deposition_id: str, paths: Paths) -> requests.Response: """Create a new version of the given record with the given files.""" # Prepare a new version based on the old version # see: https://developers.zenodo.org/#new-version) res = requests.post( f"{self.depositions_base}/{deposition_id}/actions/newversion", params={"access_token": self.access_token}, ) res.raise_for_status() # Parse out the new version (@zenodo please give this as its own field!) new_deposition_id = res.json()["links"]["latest_draft"].split("/")[-1] # Get all metadata associated with the new version (this has updated DOIs, etc.) # see: https://developers.zenodo.org/#retrieve res = requests.get( f"{self.depositions_base}/{new_deposition_id}", params={"access_token": self.access_token}, ) res.raise_for_status() new_deposition_data = res.json() # Update the version new_deposition_data["metadata"]["version"] = _prepare_new_version(new_deposition_data["metadata"]["version"]) new_deposition_data["metadata"]["publication_date"] = datetime.datetime.today().strftime("%Y-%m-%d") # Update the deposition for the new version # see: https://developers.zenodo.org/#update res = requests.put( f"{self.depositions_base}/{new_deposition_id}", json=new_deposition_data, params={"access_token": self.access_token}, ) res.raise_for_status() bucket = new_deposition_data["links"]["bucket"] # Upload new files. It calculates the hash on all of these, and if no files have changed, # there will be no update self._upload_files(bucket=bucket, paths=paths) # Send the publish command return self.publish(new_deposition_id) def _upload_files(self, *, bucket: str, paths: Paths) -> List[requests.Response]: _paths = [paths] if isinstance(paths, (str, Path)) else paths rv = [] # see https://developers.zenodo.org/#quickstart-upload for path in _paths: with open(path, "rb") as file: res = requests.put( f"{bucket}/{os.path.basename(path)}", data=file, params={"access_token": self.access_token}, ) res.raise_for_status() rv.append(res) return rv def get_record(self, record_id: Union[int, str]) -> requests.Response: """Get the metadata for a given record.""" res = requests.get( f"{self.api_base}/records/{record_id}", params={"access_token": self.access_token}, ) res.raise_for_status() return res def get_latest_record(self, record_id: Union[int, str]) -> str: """Get the latest record related to the given record.""" res_json = self.get_record(record_id).json() # Still works even in the case that the given record ID is the latest. latest = res_json["links"]["latest"].split("/")[-1] logger.debug("latest for zenodo.record:%s is zenodo.record:%s", record_id, latest) return latest def download(self, record_id: Union[int, str], name: str, *, force: bool = False, parts: PartsHint = None) -> Path: """Download the file for the given record. :param record_id: The Zenodo record id :param name: The name of the file in the Zenodo record :param parts: Optional arguments on where to store with :func:`pystow.ensure`. If none given, goes in ``<PYSTOW_HOME>/zendoo/<CONCEPT_RECORD_ID>/<RECORD>/<PATH>``. Where ``CONCEPT_RECORD_ID`` is the consistent concept record ID for all versions of the same record. If a function is given, the function should take 3 position arguments: concept record id, record id, and version, then return a sequence for PyStow. The name of the file is automatically appended to the end of the sequence. :param force: Should the file be re-downloaded if it already is cached? Defaults to false. :returns: the path to the downloaded file. :raises FileNotFoundError: If the Zenodo record doesn't have a file with the given name For example, to download the most recent version of NSoC-KG, you can use the following command: >>> path = Zenodo().download('4574555', 'triples.tsv') Even as new versions of the data are uploaded, this command will always be able to check if a new version is available, download it if it is, and return the local file path. If the most recent version is already downloaded, then it returns the local file path to the cached file. The file path uses :mod:`pystow` under the ``zenodo`` module and uses the "concept record ID" as a submodule since that is the consistent identifier between different records that are versions of the same data. """ res_json = self.get_record(record_id).json() # conceptrecid is the consistent record ID for all versions of the same record concept_record_id = res_json["conceptrecid"] # FIXME send error report to zenodo about this - shouldn't version be required? version = res_json["metadata"].get("version", "v1") logger.debug("version for zenodo.record:%s is %s", record_id, version) for file in res_json["files"]: if file["key"] == name: url = file["links"]["self"] break else: raise FileNotFoundError(f"zenodo.record:{record_id} does not have a file with key {name}") if parts is None: parts = [self.module.replace(":", "-"), concept_record_id, version] elif callable(parts): parts = parts(concept_record_id, str(record_id), version) return pystow.ensure(*parts, name=name, url=url, force=force) def download_latest( self, record_id: Union[int, str], name: str, *, force: bool = False, parts: PartsHint = None, ) -> Path: """Download the latest version of the file.""" latest_record_id = self.get_latest_record(record_id) return self.download(latest_record_id, name=name, force=force, parts=parts) def _prepare_new_version(old_version: str) -> str: new_version = datetime.datetime.today().strftime("%Y-%m-%d") if old_version == new_version: new_version += "-1" elif old_version.startswith(new_version) and old_version[-2] == "-" and old_version[-1].isnumeric(): new_version += "-" + str(1 + int(old_version[-1])) # please don't do this more than 10 times a day return new_version
zenodo-client
/zenodo_client-0.3.2.tar.gz/zenodo_client-0.3.2/src/zenodo_client/api.py
api.py
Usage ===== This example from PyOBO shows how to update a given deposition (the Zenodo word for a record): .. code-block:: # The ID from your deposition SANDBOX_DEP_ID = '724868' # Don't forget to set the ZENODO_API_TOKEN environment variable or # any valid way to get zenodo/api_token from PyStow. zenodo = Zenodo() # Paths to local files. Good to use in combination with resources that are always # dumped to the same place by a given script paths = [ # os.path.join(DATABASE_DIRECTORY, 'alts_sample.tsv') '/Users/cthoyt/Desktop/alts_sample.tsv' ] # Magically upload data to this record zenodo.update(SANDBOX_DEP_ID, paths)
zenodo-client
/zenodo_client-0.3.2.tar.gz/zenodo_client-0.3.2/docs/source/usage.rst
usage.rst
Installation ============ The most recent release can be installed from `PyPI <https://pypi.org/project/zenodo_client>`_ with: .. code-block:: shell $ pip install zenodo_client The most recent code and data can be installed directly from GitHub with: .. code-block:: shell $ pip install git+https://github.com/cthoyt/zenodo-client.git To install in development mode, use the following: .. code-block:: shell $ git clone git+https://github.com/cthoyt/zenodo-client.git $ cd zenodo-client $ pip install -e .
zenodo-client
/zenodo_client-0.3.2.tar.gz/zenodo_client-0.3.2/docs/source/installation.rst
installation.rst
zenodo_get: a downloader for Zenodo records =========================================== Travis:[![Build Status](https://travis-ci.org/dvolgyes/zenodo_get.svg?branch=master)](https://travis-ci.org/dvolgyes/zenodo_get) CircleCI:[![Build status](https://circleci.com/gh/dvolgyes/zenodo_get.svg?style=svg)](https://circleci.com/gh/dvolgyes/zenodo_get) SemaphoreCI:[![Build Status](https://semaphoreci.com/api/v1/dvolgyes/zenodo_get/branches/master/badge.svg)](https://semaphoreci.com/dvolgyes/zenodo_get) AppVeyor:[![Build status](https://ci.appveyor.com/api/projects/status/f6hw96rhdl104ch9?svg=true)](https://ci.appveyor.com/project/dvolgyes/zenodo-get) GitlabCI:[![pipeline status](https://gitlab.com/dvolgyes/zenodo_get/badges/master/pipeline.svg)](https://gitlab.com/dvolgyes/zenodo_get/commits/master) Coveralls:[![Coverage Status](https://img.shields.io/coveralls/github/dvolgyes/zenodo_get/master)](https://coveralls.io/github/dvolgyes/zenodo_get?branch=master) Codecov:[![codecov](https://codecov.io/gh/dvolgyes/zenodo_get/branch/master/graph/badge.svg)](https://codecov.io/gh/dvolgyes/zenodo_get) Snyk:[![Known Vulnerabilities](https://snyk.io/test/github/dvolgyes/zenodo_get/badge.svg)](https://snyk.io/test/github/dvolgyes/zenodo_get) This is a Python3 tool which can mass-download files from Zenodo records. [![pyversion](https://img.shields.io/pypi/pyversions/zenodo_get.svg)](https://pypi.org/project/zenodo-get/) [![PyPI - License](https://img.shields.io/pypi/l/zenodo_get.svg)](https://gitlab.com/dvolgyes/zenodo_get/raw/master/LICENSE.txt) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1261812.svg)](https://doi.org/10.5281/zenodo.1261812) Source code ----------- The code is hosted at Github and GitLab too, with the exact content. (except some temporary differences until synchronization) Install ------- From PyPI: ``` pip3 install zenodo_get ``` Or from Gitlab/Github: ``` pip3 install git+https://gitlab.com/dvolgyes/zenodo_get pip3 install git+https://github.com/dvolgyes/zenodo_get ``` Afterwards, you can query the command line options: ``` zenodo_get -h ``` but the default settings should work for most use cases: ``` zenodo_get RECORD_ID_OR_DOI ``` Documentation ------------- The tool itself is simple, and the help message is reasonable: ``` zenodo_get -h ``` but if you need more, open a github ticket and explain what is missing. Basic usage: ``` zenodo_get RECORD_ID_OR_DOI ``` Special parameters: - ``-m`` : generate md5sums.txt for verification. Beware, if `md5sums.txt` is present in the dataset, it will overwrite this generated file. Verification example: `md5sum -c md5sums.txt` - ``-w FILE`` : instead of downloading the record files, it will generate a FILE which contains direct links to the Zenodo site. These links could be downloaded with any download manager, e.g. with wget: `wget -i urls.txt` - ``-e`` : continue on error. It will skip the files with errors, but it will try to download the rest of the files. - ``-k`` : keep files: it will keep files with invalid md5 checksum. The main purpose is debugging. - ``-R N``: retry on error N times. - ``-p N``: Waiting time in sec before retry attempt. Default: 0.5 sec. - ``-n`` : do not continue. The default behaviour is to download only the files which are not yet download or where the checksum does not match with the file. This flag disables this feature, and it will force download existing files, and assigining a new name to the files (e.g. file(1).ext ) Remark for batch processing: the program always exits with non-zero exit code, if any error has happened, for instance, checksum mismatch, download error, time-out, etc. Only perfectly correct downloads end with 0 exit code. Citation -------- You don't really need to cite this software, except if you use it for another academic publication. E.g. if you download something from Zenodo with zenodo-get: no need to cite anything. If you download a lot from Zenodo, and you publish about Zenodo, and my tool is integral part of the methodology, then you could cite it. You could always ask the code to print the most up-to-date reference producing plain text and bibtex references too: ``` zenodo_get --cite ```
zenodo-get
/zenodo_get-1.4.0.tar.gz/zenodo_get-1.4.0/README.md
README.md
from time import time import json from functools import lru_cache as cache import logging from stat import S_IFREG, S_IFDIR import sys try: import requests from box import SBox from fuse import FUSE, Operations, LoggingMixIn except ImportError as e: logging.getLogger().critical(e) logging.getLogger().critical('You need to install python-box, requests and fusepy.') sys.exit(1) class WebFile: def __init__(self, url, size, chunksize=64, largefile=1024): self.url = url self.r = None self.content = None if url is not None and size < (largefile * 1024): with requests.get(url, stream=False) as r: self.content = r.content self.chunksize = 64 self.last_page = bytearray() self.last_offset = 0 self.iterator = None def reset(self): self.last_offset = 0 self.r = requests.get(self.url, stream=True) self.iterator = self.r.iter_content(chunk_size=self.chunksize * 1024) def close(self): self.last_offset = 0 self.last_page = bytearray() self.r = None self.iterator = None def read_next(self): try: if self.iterator is None: self.reset() self.last_offset += len(self.last_page) self.last_page = next(self.iterator) return self.last_page except StopIteration: return bytearray() def __getitem__(self, domain): if self.content is not None: return self.content[domain] if domain.start > domain.stop: return bytes() response = bytes() if self.last_offset > domain.start: self.reset() if self.last_offset <= domain.start + len(self.last_page): while self.last_offset + len(self.last_page) <= domain.start: chunk = self.read_next() if len(chunk) == 0: break L = self.last_offset + len(self.last_page) end = max(min(domain.stop, L) - self.last_offset, 0) if end <= 0: return bytes() start = domain.start - self.last_offset response = response + self.last_page[start:end] N = max(end - start, 0) if domain.start + N < domain.stop: response = response + self[domain.start + N:domain.stop] return response class ZenodoFS(LoggingMixIn, Operations): def __init__(self, recordIDs, sandbox_recordIDs, chunksize=64, largefile=1024): self.records = {'sandbox': [], 'zenodo': [], } self.attr_cache = SBox(default_box=True) self.dir_cache = SBox(default_box=True) self.open_files = {} self.content = {} self.chunksize = chunksize self.largefile = largefile self.logger = logging.getLogger() for rid in recordIDs: self.get_metadata(rid, sandbox=False) for rid in sandbox_recordIDs: self.get_metadata(rid, sandbox=True) @cache(maxsize=1024) def get_metadata(self, recordID, sandbox=False, exceptions=True, timeout=15): if not sandbox: url = 'https://zenodo.org/api/records/' else: url = 'https://sandbox.zenodo.org/api/records/' try: r = requests.get(url + recordID, timeout=timeout) except requests.exceptions.ConnectTimeout: self.logger.critical('Connection timeout during metadata reading.') raise except Exception: self.logger.critical('Connection error during metadata reading.') raise js = {} if r.ok: js = json.loads(r.text)['files'] for f in json.loads(r.text)['files']: path = 'zenodo' if not sandbox else 'sandbox' self.attr_cache[f'/{path}/{recordID}/{f["key"]}'] = SBox( f, default_box=True) self.content[f"/{path}/{recordID}.json"] = (SBox(metadata=js).to_json() + "\n").encode() self.content[f"/{path}/{recordID}.yaml"] = SBox(metadata=js).to_yaml().encode() return js def readdir(self, path, fh): level = len(path.split('/')) content = [name for name in self.attr_cache.keys() if name.startswith(path)] if path == '/': return ['.', 'sandbox', 'zenodo'] elif path in ('/sandbox', '/zenodo'): content = [name for name in self.attr_cache.keys() if name.startswith(path)] else: parts = path.split('/') if len(parts) >= 3: recordID = parts[2] self.get_metadata(recordID) content = [name for name in self.attr_cache.keys() if name.startswith(path)] N = len(path) + 1 content = list({name[N:].split('/')[0] for name in content if len(name) > N and name[N-1] == '/'}) if level == 2: content = content + [f"{name}.yaml" for name in content if name.find('.') == -1] + [f"{name}.json" for name in content if name.find('.') == -1] return list(set(content)) def getattr(self, path, fh=None): parts = path.split('/') level = len(parts) st = {} if path in ['/', '/sandbox', '/zenodo']: st['st_mode'] = (S_IFDIR | 0o755) st['st_nlink'] = 2 elif level == 3: if path.find('.') > -1: size = len(self.content[path]) st = {'st_mode': (S_IFREG | 0o444), 'st_size': size} else: st['st_mode'] = (S_IFDIR | 0o755) st['st_nlink'] = 2 else: size = 0 st = {'st_mode': (S_IFREG | 0o444), 'st_size': size} if level >= 3: recordID = parts[2] self.get_metadata(recordID) if level == 4: fn = self.attr_cache[path] if 'size' in fn: st['st_size'] = fn['size'] st['st_ctime'] = st['st_mtime'] = st['st_atime'] = time() return st def open(self, path, mode): if path not in self.open_files: url = self.attr_cache[path]['links'].get('self') size = self.attr_cache[path].get('size', 0) self.open_files[path] = WebFile(url, size, self.chunksize, self.largefile) return 0 def read(self, path, size, offset, fh): if path in self.content: return self.content[path][offset:offset + size] return self.open_files[path][offset:offset + size] def release(self, path, fh): if path in self.open_files: wf = self.open_files.pop(path) wf.close() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("mountpoint", type=str, help="mount point") parser.add_argument("-r", "--record", nargs='+', action='extend', default=[], help='record ID(s)') parser.add_argument("-s", "--sandbox", nargs='+', action='extend', default=[], help='sandbox record ID(s)') parser.add_argument("-c", "--chunk_size", type=int, default=64, help='chunk size [KB] for network download (default: 64)') parser.add_argument("-l", "--large_file_limit", type=int, default=256, help='file size [KB] which is downloaded without splitting into chunks (default: 256)') parser.add_argument("-L", "--log_level", default='error', const='error', nargs='?', choices=['critical', 'error', 'warning', 'info', 'debug'], help='log level (default: error)') parser.add_argument("-f", "--foreground", action="store_true", default=False) parser.add_argument("-d", "--debug", action="store_true", default=False) args = parser.parse_args() level = {'critical': logging.CRITICAL, 'error': logging.ERROR, 'warning': logging.WARNING, 'info': logging.INFO, 'debug': logging.DEBUG, }[args.log_level] logging.basicConfig(level=level) fuse = FUSE(ZenodoFS(args.record, args.sandbox, chunksize=args.chunk_size, largefile=args.large_file_limit), args.mountpoint, foreground=args.foreground, nothreads=True, debug=args.debug )
zenodo-get
/zenodo_get-1.4.0.tar.gz/zenodo_get-1.4.0/zenodofs.py
zenodofs.py
import zenodo_get as zget import requests import hashlib import sys import os from optparse import OptionParser import wget import time import signal from pathlib import Path from contextlib import contextmanager import os from urllib.parse import unquote #see https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python/24176022#24176022 @contextmanager def cd(newdir): prevdir = os.getcwd() os.chdir(os.path.expanduser(newdir)) try: yield finally: os.chdir(prevdir) def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) def ctrl_c(func): signal.signal(signal.SIGINT, func) return func abort_signal = False abort_counter = 0 exceptions = False @ctrl_c def handle_ctrl_c(*args, **kwargs): global abort_signal global abort_counter global exceptions abort_signal = True abort_counter += 1 if abort_counter >= 2: eprint() eprint('Immediate abort. There might be unfinished files.') if exceptions: raise Exception('Immediate abort') else: sys.exit(1) def check_hash(filename, checksum): algorithm, value = checksum.split(':') if not os.path.exists(filename): return value, 'invalid' h = hashlib.new(algorithm) with open(filename, 'rb') as f: while True: data = f.read(4096) if not data: break h.update(data) digest = h.hexdigest() return value, digest def zenodo_get(argv=None): global exceptions if argv is None: argv = sys.argv[1:] exceptions = False else: exceptions = True parser = OptionParser( usage='%prog [options] RECORD_OR_DOI', version=f'%prog {zget.__version__}' ) parser.add_option( '-c', '--cite', dest='cite', action='store_true', default=False, help='print citation information', ) parser.add_option( '-r', '--record', action='store', type='string', dest='record', help='Zenodo record ID', default=None, ) parser.add_option( '-d', '--doi', action='store', type='string', dest='doi', help='Zenodo DOI', default=None, ) parser.add_option( '-m', '--md5', action='store_true', # ~type=bool, dest='md5', help='Create md5sums.txt for verification.', default=False, ) parser.add_option( '-w', '--wget', action='store', type='string', dest='wget', help='Create URL list for download managers. ' '(Files will not be downloaded.)', default=None, ) parser.add_option( '-e', '--continue-on-error', action='store_true', dest='error', help='Continue with next file if error happens.', default=False, ) parser.add_option( '-k', '--keep', action='store_true', dest='keep', help='Keep files with invalid checksum.' ' (Default: delete them.)', default=False, ) parser.add_option( '-n', '--do-not-continue', action='store_false', dest='cont', help='Do not continue previous download attempt. (Default: continue.)', default=True, ) parser.add_option( '-R', '--retry', action='store', type=int, dest='retry', help='Retry on error N more times.', default=0, ) parser.add_option( '-p', '--pause', action='store', type=float, dest='pause', help='Wait N second before retry attempt, e.g. 0.5', default=0.5, ) parser.add_option( '-t', '--time-out', action='store', type=float, dest='timeout', help='Set connection time-out. Default: 15 [sec].', default=15., ) parser.add_option( '-o', '--output-dir', action='store', type=str, dest='outdir', default='.', help='Output directory, created if necessary. Default: current directory.', ) parser.add_option( '-s', '--sandbox', action='store_true', dest='sandbox', help='Use Zenodo Sandbox URL.', default=False, ) parser.add_option( '-a', '--access-token', action='store', type=str, dest='access_token', default=None, help='Optional access token for the requests query.', ) (options, args) = parser.parse_args(argv) if options.cite: print('Reference for this software:') print(zget.__reference__) print() print('Bibtex format:') print(zget.__bibtex__) if exceptions: return else: sys.exit(0) # create directory, if necessary, then change to it options.outdir=Path(options.outdir) options.outdir.mkdir(parents=True, exist_ok=True) with cd(options.outdir): if len(args) > 0: try: options.record = str(int(args[0])) except ValueError: options.doi = args[0] elif options.doi is None and options.record is None: parser.print_help() if exceptions: return else: sys.exit(0) if options.doi is not None: url = options.doi if not url.startswith('http'): url = 'https://doi.org/' + url try: r = requests.get(url, timeout=options.timeout) except requests.exceptions.ConnectTimeout: eprint('Connection timeout.') if exceptions: raise else: sys.exit(1) except Exception: eprint('Connection error.') if exceptions: raise else: sys.exit(1) if not r.ok: eprint('DOI could not be resolved. Try again, or use record ID.') if exceptions: raise ValueError('DOI', options.doi) else: sys.exit(1) recordID = r.url.split('/')[-1] else: recordID = options.record recordID = recordID.strip() if not options.sandbox: url = 'https://zenodo.org/api/records/' else: url = 'https://sandbox.zenodo.org/api/records/' params = {} if options.access_token: params['access_token'] = options.access_token try: r = requests.get(url + recordID, params=params, timeout=options.timeout) except requests.exceptions.ConnectTimeout: eprint('Connection timeout during metadata reading.') if exceptions: raise else: sys.exit(1) except Exception: eprint('Connection error during metadata reading.') if exceptions: raise else: sys.exit(1) if r.ok: js = r.json() files = js['files'] total_size = sum(f['size'] for f in files) if options.md5 is not None: with open('md5sums.txt', 'wt') as md5file: for f in files: fname = f['key'] checksum = f['checksum'].split(':')[-1] md5file.write(f'{checksum} {fname}\n') if options.wget is not None: if options.wget == '-': for f in files: link = f['links']['self'] print(link) else: with open(options.wget, 'wt') as wgetfile: for f in files: fname = f['key'] link = 'https://zenodo.org/record/{}/files/{}'.format( recordID, fname ) wgetfile.write(link + '\n') else: eprint('Title: {}'.format(js['metadata']['title'])) eprint('Keywords: ' + (', '.join(js['metadata'].get('keywords', [])))) eprint('Publication date: ' + js['metadata']['publication_date']) eprint('DOI: ' + js['metadata']['doi']) eprint('Total size: {:.1f} MB'.format(total_size / 2 ** 20)) for f in files: if abort_signal: eprint('Download aborted with CTRL+C.') eprint('Already successfully downloaded files are kept.') break link = f['links']['self'] size = f['size'] / 2 ** 20 eprint() eprint(f'Link: {link} size: {size:.1f} MB') fname = f['key'] checksum = f['checksum'] remote_hash, local_hash = check_hash(fname, checksum) if remote_hash == local_hash and options.cont: eprint(f'{fname} is already downloaded correctly.') continue for _ in range(options.retry + 1): try: link = url = unquote(link) filename = wget.download(f"{link}?access_token={options.access_token}") except Exception: eprint(f' Download error. Original link: {link}') time.sleep(options.pause) else: break else: eprint(' Too many errors.') if not options.error: eprint(' Download is aborted.') if exceptions: raise Exception('too many errors') else: sys.exit(1) eprint(' Download continues with the next file.') continue eprint() h1, h2 = check_hash(filename, checksum) if h1 == h2: eprint(f'Checksum is correct. ({h1})') else: eprint(f'Checksum is INCORRECT!({h1} got:{h2})') if not options.keep: eprint(' File is deleted.') os.remove(filename) else: eprint(' File is NOT deleted!') if not options.error: sys.exit(1) else: eprint('All files have been downloaded.') else: eprint('Record could not get accessed.') if exceptions: raise Exception('Record could not get accessed.') else: sys.exit(1)
zenodo-get
/zenodo_get-1.4.0.tar.gz/zenodo_get-1.4.0/zenodo_get/zget.py
zget.py
from decopatch import function_decorator, DECORATED from makefun import wraps from pathlib import Path import openai import os import sys import pickle from typing import Union exclusion_list = ( "pytest", "git", "python3", "fix", "fix3", "fix4", "geany", "sphinx", "sphinx-build", "firefox", ) def fix_call( model: str, tool_name: str, help_msg: str, cli_used: str, error_message: str, ) -> str: """ Fix a command line tools invocation by analyzing the tools user's manual, the invoked command line, and the resulting error message. The tool uses either the gpt-3.5-turbo or the gpt-4 model. :param model: Name of the GPT model to use. :type model: str :param tool_name: Name of the command line tool. :type tool_name: str :param help_msg: Help message of the command line tool. :type help_msg: str :param cli_used: The invoked command line. :type cli_used: str :param error_message: The resulting error message. :type error_message: str :return: Fixed command to be executed. :rtype: str """ openai.organization = os.getenv("OPENAI_ORG_ID") or openai.organization openai.api_key = os.getenv("OPENAI_API_KEY") or openai.api_key lines = help_msg.split("\n") if len(lines) > 20: help_msg = ( "\n".join(lines[:10]) + "\n\n<<redacted help message>>\n\n" + "\n".join(lines[-10:]) ) content = [ { "role": "system", "content": f""" You are a IT expert and you help with command line tools. You read CLI tool documentation and you help the users to solve their problem. Your answer is always very brief, concise and succint. You do not explain basic shell commands and how they work, you only try to analyze the problem, e.g. wrong argument or wrong file name. If error message is provided, you try to take into account this message to figure out the reason of the problem. If not provided, then you analyze the command line options and the tool desciption to find the problem. If fixing the parameters is possible, then you first provide very brief explanation, then you end your response with the fixed code, no explanation after the code. If fix alone is not possible, then you recommend fixed command line AND you highlight what to check but this recommendation is brief, concise and succint. Important notation: all references to incorrect values should be marked in the format of [bold red]`INCORRECT`[/]. All reference to proposed, fixed values should be marked with the format: [bold green]`PROPOSED`[/] After the end of each sentence, insert a new line character. Longer code blocks inside "```" should not be marked. Here is the description of the tool you work with: Tool name: {tool_name} {help_msg} """, }, { "role": "assistant", "content": "I understand. An example: ```ln -symboliclink file1 file2.``` " "Using [bold red]`-symboliclink`[/bold red] is incorrect, instead you can use [bold green]`-s`[/bold green].\n" "Try this:\n[green]```ln -s file1 file2```[/green]", }, { "role": "user", "content": f""" This is the failed command line: ```bash {tool_name} {cli_used} ``` """, }, ] if error_message: if len(error_message) > 500: lines = error_message.split("\n") error_message = ( "\n".join(lines[:10]) + "<redacted error message>\n " + "\n".join(lines[-10:]) ) content.append( { "role": "user", "content": f""" The following error message was shown: ```text {error_message} ``` """, } ) try: completion = openai.ChatCompletion.create( model=model, messages=content, max_tokens=512, top_p=0.8, temperature=0.7, user="test_user_gpt4cli", ) return completion["choices"][0]["message"]["content"] except Exception as e: return f"Unfortunately, an exception happend in my processing, can't help at the moment.\n{e}" @function_decorator def gpt4click( name: str = "", model: str = "gpt-3.5-turbo-16k", f=DECORATED ) -> callable: """ Decorator over a `click` command line interface. If the click command ends with an error, it tries to analyze the reason with a GPT-3.5 or GPT-4.0 model. :param name: name of the command line tool. By default it will look up sys.args[0] :type name: str :param model: name of the GPT model :type model: str :param f: Decorated function :type f: callable :return: New function with improved error handling :rtype: callable """ if not name: name = Path(sys.argv[0]).name @wraps(f) def new_f(*args, **kwargs) -> None: try: ctx = f.make_context(name, sys.argv[1:]) with ctx: f.invoke(ctx) except Exception as error_msg: error_message = repr(error_msg) import click help_msg = click.Context(f).get_help() cli_args = " ".join(sys.argv[1:]) fixed_cli = fix_call(model, name, help_msg, cli_args, error_message) print(f"Here's the corrected command ({model=}):") print(fixed_cli) return new_f def parse_code_snippet(txt: str) -> tuple[str, str]: import parse parsed = parse.parse("{text}```bash\n{code}```", txt) if parsed is None: return txt, "" text = parsed["text"].strip() code = parsed["code"].strip() text = text.replace(". ", ".\n") return text, code def send_text_to_terminal(text: str) -> None: import subprocess console = Console() console.print( "You can try the [dark_sea_green4]fixed version[/] by just pressing [red]ENTER[/red]:" ) subprocess.run( ["xdotool", "type", "--clearmodifiers", text], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) if __name__ == "__main__": from subprocess import run p = run(sys.argv[1:]) if p.returncode: model = "gpt-3.5-turbo-16k" model = "gpt-3.5-turbo-0613" tool_name = Path(sys.argv[1]).name cli_args = " ".join(sys.argv[2:]) from rich.console import Console from rich.text import Text console = Console() if tool_name in exclusion_list: console.print( Text.from_markup( f"Unfortunately, I cannot help with this tool ([orange1]{tool_name}[/])" ) ) else: console.print( Text.from_markup( f"\n\nAn error has been detected, it is under analysis with [bright_magenta]{model}[/].\n" ) ) console.print(Text.from_markup("Your original command line was:\n")) console.print(Text.from_markup(f"[orange1]{tool_name} {cli_args}[/]\n")) p1 = run([sys.argv[1], "--help"], capture_output=True) help_message = (p1.stdout.decode() + p1.stderr.decode()).strip() p2 = run(sys.argv[1:], capture_output=True) error_msg = p2.stderr.decode().strip() cache_file = Path("~/.gpt4cache.pickle") cache = {} if cache_file.exists(): content = cache_file.read_bytes() if len(content) > 0: cache = pickle.loads(content) args = (model, tool_name, help_message, cli_args, error_msg) if args in cache: result = cache[args] else: result = fix_call(model, tool_name, help_message, cli_args, error_msg) cache[args] = result cache_file.write_bytes( pickle.dumps(cache, protocol=pickle.HIGHEST_PROTOCOL) ) text, code = parse_code_snippet(result) console.print(Text.from_markup(text, justify="left")) console.print( Text.from_markup(f"[bold dark_sea_green4]{code}[/]", justify="left") ) if code: if len(code.strip().split("\n")) == 1: if os.getenv("GPT4SHELL", "default").lower() == "autofill": send_text_to_terminal(f"{code}")
zenodo-get
/zenodo_get-1.4.0.tar.gz/zenodo_get-1.4.0/zenodo_get/gpt4click.py
gpt4click.py
import os from typing import Optional import click from requests import Response from zenodo_rest.entities import Deposition, Metadata from zenodo_rest.entities.bucket_file import BucketFile from zenodo_rest.exceptions import NoDraftFound from . import actions @click.group() def depositions(): pass @depositions.command() @click.option("--metadata", help="Optional json of metadata for the deposition.") @click.option( "--metadata_file", type=click.Path(exists=True, file_okay=True, dir_okay=False), help="Optional json file of metadata for the deposition.", ) @click.option( "--prereserve-doi", is_flag=True, help="Prereserve a DOI (not pushed to Datacite until deposition is published).", ) @click.option( "--dest", type=click.Path(), default=None, help="A file to write the resulting deposition json representation to.", ) def create( metadata: Optional[str] = None, metadata_file: Optional[str] = None, prereserve_doi: Optional[bool] = None, dest: Optional[str] = None, ): metadata_parsed: Metadata = Metadata() if isinstance(metadata, str): metadata_parsed = Metadata.parse_raw(metadata) elif isinstance(metadata, Metadata): metadata_parsed = metadata if metadata_file is not None: metadata_parsed = Metadata.parse_file(metadata_file) deposition: Deposition = actions.create(metadata_parsed, prereserve_doi) json_response = deposition.json(exclude_none=True, indent=4) click.echo(json_response) if dest is None: return if len(os.path.dirname(dest)) > 0: os.makedirs(os.path.dirname(dest), exist_ok=True) with open(dest, "w", encoding="utf-8") as f: f.write(json_response) @depositions.command() @click.argument("deposition-id", type=click.INT) @click.option( "--dest", type=click.Path(), default=None, help="A file to write the resulting deposition json representation to.", ) def retrieve(deposition_id: int, dest: Optional[str] = None): """Retrieve deposition by ID from server. DEPOSITION-ID is the id of the deposition to be fetched """ deposition: Deposition = actions.retrieve(deposition_id) json_response = deposition.json(exclude_none=True, indent=4) click.echo(json_response) if dest is None: return if len(os.path.dirname(dest)) > 0: os.makedirs(os.path.dirname(dest), exist_ok=True) with open(dest, "w", encoding="utf-8") as f: f.write(json_response) @depositions.command("list") @click.option( "--query", "-q", help="Search query (using Elasticsearch query string syntax)." ) @click.option( "--status", help="Filter result based on deposit status (either draft or published)" ) @click.option( "--sort", help=( "Sort order (bestmatch or mostrecent)." "Prefix with minus to change form ascending to descending (e.g. -mostrecent)." ), ) @click.option("--page", help="Page number for pagination") @click.option("--size", help="Number of results to return per page.") @click.option( "--all-versions", help="Show (true or 1) or hide (false or 0) all versions of deposits.", ) def search_depositions( query: Optional[str] = None, status: Optional[str] = None, sort: Optional[str] = None, page: Optional[str] = None, size: Optional[int] = None, all_versions: bool = None, ): result: list[Deposition] result = actions.search(query, status, sort, page, size, all_versions) for x in result: click.echo(x.json(exclude_none=True, indent=2)) @depositions.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) @click.argument( "metadata_file", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) def update( deposition_json: str, metadata_file: str, ): """Update metadata for a not yet published deposition DEPOSITION_JSON the file with a json representation of the deposition to be updated METADATA_FILE the path to a metadata json file to be used as input """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.get_latest_draft() metadata = Metadata.parse_file(metadata_file) deposition = actions.update_metadata(deposition.id, metadata) json_response = deposition.json(exclude_none=True, indent=4) click.echo(json_response) @depositions.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) def delete( deposition_json: str, ): """Delete a not yet published deposition DEPOSITION_JSON json representation of the deposition to be deleted """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.get_latest_draft() response: Response = actions.delete_remote(deposition.id) json_response = response.json(exclude_none=True, indent=4) click.echo(json_response) @depositions.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) @click.argument( "file", type=click.Path(exists=True, file_okay=True, dir_okay=True), ) def upload_file( deposition_json: str, file: str, ): """Upload a file to the bucket of a not yet published deposition DEPOSITION_JSON json representation of the deposition to be uploaded to. FILE the path to a file to be uploaded """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.get_latest_draft() bucket_file: BucketFile = actions.upload_file(deposition.id, file) json_response = bucket_file.json(exclude_none=True, indent=4) click.echo(json_response) @depositions.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) def delete_files( deposition_json: str, ): """Delete files from the bucket of a not yet published deposition DEPOSITION_JSON json representation of the deposition to be uploaded to. """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.get_latest_draft() responses = deposition.delete_files() click.echo(responses) @depositions.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) @click.option( "--dest", type=click.Path(), default=None, help="A file to write the resulting deposition json representation to.", ) def publish( deposition_json: str, dest: Optional[str] = None, ): """Publish a pending deposition DEPOSITION_JSON json representation of the deposition to be published """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.get_latest_draft() deposition = actions.publish(deposition.id) json_response = deposition.json(exclude_none=True, indent=4) click.echo(json_response) if dest is None: return if len(os.path.dirname(dest)) > 0: os.makedirs(os.path.dirname(dest), exist_ok=True) with open(dest, "w", encoding="utf-8") as f: f.write(json_response) @depositions.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) @click.option( "--dest", type=click.Path(), default=None, help="A file to write the resulting deposition json representation to.", ) def new_version(deposition_json: str, dest: Optional[str] = None): """Create a new version of a published disposition DEPOSITION_JSON json representation of the deposition to be published """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.refresh() deposition = deposition.get_latest() deposition = actions.new_version(deposition.id) json_response = deposition.json(exclude_none=True, indent=4) click.echo(json_response) if dest is None: return if len(os.path.dirname(dest)) > 0: os.makedirs(os.path.dirname(dest), exist_ok=True) with open(dest, "w", encoding="utf-8") as f: f.write(json_response) @depositions.group() def doi(): """Get DOIs related to depositions""" pass @doi.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) @click.option( "--full-url", "-f", is_flag=True, help="Return the full url of the latest draft's DOI", ) def latest( deposition_json: str, full_url: bool, ): """Print the doi of the latest published version of the given deposition. DEPOSITION_JSON json representation of the deposition """ deposition: Deposition = Deposition.parse_file(deposition_json) deposition = deposition.get_latest() if full_url: click.echo(deposition.doi_url) else: click.echo(deposition.doi) @doi.command() @click.argument( "deposition-json", type=click.Path(exists=True, file_okay=True, dir_okay=False), ) @click.option( "--full-url", "-f", is_flag=True, help="Return the full url of the latest draft's DOI", ) def latest_draft( deposition_json: str, full_url: bool, ): """Print the DOI of the latest related deposition draft DEPOSITION_JSON json representation of the deposition """ deposition: Deposition = Deposition.parse_file(deposition_json) draft: Deposition = deposition.get_latest_draft() if draft is None: raise NoDraftFound(deposition) if full_url: click.echo(draft.doi_url) else: click.echo(draft.doi)
zenodo-rest
/zenodo-rest-0.0.0b9.tar.gz/zenodo-rest-0.0.0b9/zenodo_rest/depositions/depositions.py
depositions.py
import os import tempfile from pathlib import Path from shutil import make_archive from typing import Optional import requests from zenodo_rest.entities.bucket_file import BucketFile from zenodo_rest.entities.deposition import Deposition from zenodo_rest.entities.metadata import Metadata def create( metadata: Metadata = Metadata(), prereserve_doi: Optional[bool] = None, token: Optional[str] = None, base_url: Optional[str] = None, ) -> Deposition: """ Create a deposition on the server, but do not publish it. """ if token is None: token = os.getenv("ZENODO_TOKEN") if base_url is None: base_url = os.getenv("ZENODO_URL") if prereserve_doi is True: metadata.prereserve_doi = True header = {"Authorization": f"Bearer {token}"} response = requests.post( f"{base_url}/api/deposit/depositions", json={"metadata": metadata.dict(exclude_none=True)}, headers=header, ) response.raise_for_status() return Deposition.parse_obj(response.json()) def retrieve( deposition_id: str, token: Optional[str] = None, base_url: Optional[str] = None ) -> Deposition: return Deposition.retrieve(deposition_id, token, base_url) def upload_file( deposition_id: str, path_or_file: str, token: Optional[str] = None ) -> BucketFile: """ :param deposition_id: :param path_or_file: pass a path to zip and upload or a file_path to upload :param token: :return: """ deposition: Deposition = retrieve(deposition_id) bucket_url = deposition.get_bucket() if token is None: token = os.getenv("ZENODO_TOKEN") path = Path(path_or_file) tempdir = None if path.is_dir(): tempdir = tempfile.TemporaryDirectory() zip_file = os.path.join(tempdir.name, path.stem) make_archive(zip_file, "zip", root_dir=path.absolute()) path = Path(f"{zip_file}.zip") header = {"Authorization": f"Bearer {token}"} with open(path.absolute(), "rb") as fp: r = requests.put( f"{bucket_url}/{path.name}", data=fp, headers=header, ) if tempdir is not None: tempdir.cleanup() r.raise_for_status() return BucketFile.parse_obj(r.json()) def update_metadata( deposition_id: str, metadata: Metadata, token: Optional[str] = None, base_url: Optional[str] = None, ) -> Deposition: if token is None: token = os.getenv("ZENODO_TOKEN") if base_url is None: base_url = os.getenv("ZENODO_URL") header = {"Authorization": f"Bearer {token}", "Accept": "application/json"} response = requests.put( f"{base_url}/api/deposit/depositions/{deposition_id}", json={"metadata": metadata.dict(exclude_none=True)}, headers=header, ) response.raise_for_status() return Deposition.parse_obj(response.json()) def delete_remote( deposition_id: str, token: Optional[str] = None, base_url: Optional[str] = None ) -> requests.Response: if token is None: token = os.getenv("ZENODO_TOKEN") if base_url is None: base_url = os.getenv("ZENODO_URL") header = { "Authorization": f"Bearer {token}", } response = requests.delete( f"{base_url}/api/deposit/depositions/{deposition_id}", headers=header, ) response.raise_for_status() return response def publish( deposition_id: str, token: Optional[str] = None, base_url: Optional[str] = None ) -> Deposition: if token is None: token = os.getenv("ZENODO_TOKEN") if base_url is None: base_url = os.getenv("ZENODO_URL") header = { "Authorization": f"Bearer {token}", } response = requests.post( f"{base_url}/api/deposit/depositions/{deposition_id}/actions/publish", headers=header, ) response.raise_for_status() return Deposition.parse_obj(response.json()) def new_version( deposition_id: str, token: Optional[str] = None, base_url: Optional[str] = None ) -> Deposition: if token is None: token = os.getenv("ZENODO_TOKEN", token) if base_url is None: base_url = os.getenv("ZENODO_URL") header = { "Authorization": f"Bearer {token}", } response = requests.post( f"{base_url}/api/deposit/depositions/{deposition_id}/actions/newversion", headers=header, ) response.raise_for_status() deposition: Deposition = Deposition.parse_obj(response.json()) return deposition def search( query: Optional[str] = None, status: Optional[str] = None, sort: Optional[str] = None, page: Optional[str] = None, size: Optional[int] = None, all_versions: Optional[bool] = None, token: Optional[str] = None, ) -> list[Deposition]: if token is None: token = os.getenv("ZENODO_TOKEN") base_url = os.getenv("ZENODO_URL") header = {"Authorization": f"Bearer {token}"} params: dict = {} if query is not None: params["q"] = query if status is not None: params["status"] = status if sort is not None: params["sort"] = sort if page is not None: params["page"] = page if size is not None: params["size"] = size if all_versions: params["all_versions"] = "true" response = requests.get( f"{base_url}/api/deposit/depositions", headers=header, params=params ) response.raise_for_status() return [Deposition.parse_obj(x) for x in response.json()]
zenodo-rest
/zenodo-rest-0.0.0b9.tar.gz/zenodo-rest-0.0.0b9/zenodo_rest/depositions/actions.py
actions.py
from datetime import date from enum import Enum from typing import Optional, Union from pydantic import BaseModel from zenodo_rest.entities.community import Community from zenodo_rest.entities.creator import Contributor, Creator from zenodo_rest.entities.date_interval import DateInterval from zenodo_rest.entities.doi import Doi from zenodo_rest.entities.grant import Grant from zenodo_rest.entities.location import Location from zenodo_rest.entities.subject import Subject class UploadType(str, Enum): publication = "publication" poster = "poster" presentation = "presentation" dataset = "dataset" image = "image" video_audio = "video" software = "software" lesson = "lesson" physical_object = "physicalobject" other = "other" class PublicationType(str, Enum): annotation_collection = "annotationcollection" book = "book" section = "section" conference_paper = "conferencepaper" data_management_plan = "datamanagementplan" article = "article" patent = "patent" preprint = "preprint" deliverable = "deliverable" milestone = "milestone" proposal = "proposal" report = "report" software_documentation = "softwaredocumentation" taxonomic_treatment = "taxonomictreatment" technical_note = "technicalnote" thesis = "thesis" working_paper = "workingpaper" other = "other" class ImageType(str, Enum): figure = "figure" plot = "plot" drawing = "drawing" diagram = "diagram" photo = "photo" other = "other" class AccessRight(str, Enum): open = "open" embargoed = "embargoed" restricted = "restricted" closed = "closed" class Metadata(BaseModel): upload_type: UploadType = UploadType.other publication_type: Optional[PublicationType] = None image_type: Optional[ImageType] = None publication_date: str = date.today().isoformat() title: str = "Placeholder" creators: list[Creator] = [Creator()] description: str = "Placeholder" access_right: AccessRight = AccessRight.open license: Optional[str] = None embargo_date: Optional[date] = None access_conditions: Optional[str] = None doi: Optional[str] = None prereserve_doi: Optional[Union[Doi, bool]] = None keywords: Optional[list[str]] = None notes: Optional[str] = None related_identifiers: Optional[list[object]] = None contributors: Optional[list[Contributor]] = None references: Optional[list[str]] = None communities: Optional[list[Community]] = None grants: Optional[list[Grant]] = None journal_title: Optional[str] = None journal_volume: Optional[str] = None journal_issue: Optional[str] = None journal_pages: Optional[str] = None conference_title: Optional[str] = None conference_acronym: Optional[str] = None conference_dates: Optional[str] = None conference_place: Optional[str] = None conference_url: Optional[str] = None conference_session: Optional[str] = None conference_session_part: Optional[str] = None imprint_publisher: Optional[str] = None imprint_isbn: Optional[str] = None imprint_place: Optional[str] = None partof_title: Optional[str] = None partof_pages: Optional[str] = None thesis_supervisors: Optional[list[Creator]] = None thesis_university: Optional[str] = None subjects: Optional[list[Subject]] = None version: Optional[str] = None language: Optional[str] = None locations: Optional[list[Location]] = None dates: Optional[list[DateInterval]] = None method: Optional[str] = None
zenodo-rest
/zenodo-rest-0.0.0b9.tar.gz/zenodo-rest-0.0.0b9/zenodo_rest/entities/metadata.py
metadata.py
import os from typing import Optional, TypeVar import requests from pydantic import BaseModel from zenodo_rest.entities.deposition_file import DepositionFile from zenodo_rest.entities.metadata import Metadata from zenodo_rest import exceptions T = TypeVar("Deposition") class Deposition(BaseModel): created: str doi: Optional[str] doi_url: Optional[str] files: Optional[list[DepositionFile]] id: str links: dict metadata: Metadata modified: str owner: int record_id: int record_url: Optional[str] state: str submitted: bool title: str @staticmethod def retrieve( deposition_id: str, token: Optional[str] = None, base_url: Optional[str] = None ) -> T: if token is None: token = os.getenv("ZENODO_TOKEN") if base_url is None: base_url = os.getenv("ZENODO_URL") header = {"Authorization": f"Bearer {token}", "Accept": "application/json"} response = requests.get( f"{base_url}/api/deposit/depositions/{deposition_id}", headers=header, ) response.raise_for_status() return Deposition.parse_obj(response.json()) def refresh(self, token: str = None) -> Optional[T]: return Deposition.retrieve(self.id, token) def get_latest(self, token: str = None) -> Optional[T]: deposition: Deposition = self.refresh(token) latest_url = deposition.links.get("latest", None) if latest_url is None: return deposition.refresh() latest_id = latest_url.rsplit("/", 1)[1] return Deposition.retrieve(latest_id) def get_latest_draft(self, token: str = None) -> Optional[T]: deposition: Deposition = self.refresh(token) latest_draft_url = deposition.links.get("latest_draft", None) if latest_draft_url is None: raise exceptions.NoDraftFound(deposition.id) if token is None: token = os.getenv("ZENODO_TOKEN") header = {"Authorization": f"Bearer {token}", "Accept": "application/json"} response = requests.get( latest_draft_url, headers=header, ) response.raise_for_status() return Deposition.parse_obj(response.json()) def get_bucket(self) -> str: return self.links.get("bucket") def delete_file(self, file_id: str, token: str = None, base_url: str = None) -> int: if token is None: token = os.getenv("ZENODO_TOKEN") if base_url is None: base_url = os.getenv("ZENODO_URL") header = {"Authorization": f"Bearer {token}", "Accept": "application/json"} response = requests.delete( f"{base_url}/api/deposit/depositions/{self.id}/files/{file_id}", headers=header, ) response.raise_for_status() return response.status_code def delete_files(self, token: str = None, base_url: str = None) -> list[int]: return [self.delete_file(file.id, token, base_url) for file in self.files]
zenodo-rest
/zenodo-rest-0.0.0b9.tar.gz/zenodo-rest-0.0.0b9/zenodo_rest/entities/deposition.py
deposition.py
# zenodoclient [![Build Status](https://github.com/dlce-eva/zenodoclient/workflows/tests/badge.svg)](https://github.com/dlce-eva/zenodoclient/actions?query=workflow%3Atests) [![PyPI](https://img.shields.io/pypi/v/zenodoclient.svg)](https://pypi.org/project/zenodoclient) Python package to access the Zenodo API ([REST](http://developers.zenodo.org/) and [OAI-PMH](http://developers.zenodo.org/#oai-pmh)) programmatically and from the command line. # Install To install from pypi ```shell pip install zenodoclient ``` Instructions for a development installation can be found in [`CONTRIBUTING.md`](CONTRIBUTING.md). # Curating deposits To curate deposits on Zenodo, you need an [access token](https://zenodo.org/account/settings/applications/tokens/new/). Then you can use the CLI: ``` zenodo --access-token $YOURTOKEN ls ``` # Accessing OAI-PMH feeds Zenodo disseminates the metadata for communities via OAI-PMH. This metadata can be accessed programmatically from python as folows: ```python >>> from zenodoclient.oai import Records >>> recs = Records('dictionaria') >>> len(recs) 18 ``` We can list the latest version for each Dictionaria dictionary: ```python >>> import itertools >>> for d, records in itertools.groupby(sorted(recs, key=lambda r: (r.repos.repos, r.version), reverse=True), lambda r: r.repos.repos): ... print(d, next(records).tag) ... wersing v1.0 tseltal v1.0.1 teop v1.0 sidaama v1.0 sanzhi v1.0 palula v1.0 nen v1.1 medialengua v1.0 kalamang v1.0 hdi v1.1 guarayu v1.0 diidxaza v1.0 daakaka v1.1.1 ``` and look at metadata: ```python >>> recs[0].doi '10.5281/zenodo.3066952' >>> recs[0].citation 'Henrik Liljegren. (2019). dictionaria/palula: Palula Dictionary (Version v1.0) [Data set]. Zenodo. http://doi.org/10.5281/zenodo.3066952' ```
zenodoclient
/zenodoclient-0.5.1.tar.gz/zenodoclient-0.5.1/README.md
README.md
# zenodopy ![Tests](https://github.com/lgloege/zenodopy/actions/workflows/tests.yaml/badge.svg) [![codecov](https://codecov.io/gh/lgloege/zenodopy/branch/main/graph/badge.svg?token=FVCS71HPHC)](https://codecov.io/gh/lgloege/zenodopy) [![pypi](https://badgen.net/pypi/v/zenodopy)](https://pypi.org/project/zenodopy) [![License:MIT](https://img.shields.io/badge/License-MIT-lightgray.svg?style=flt-square)](https://opensource.org/licenses/MIT) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/lgloege/zenodopy/issues) ### Project under active deveopment, not production ready A Python 3.6+ package to manage [Zenodo](https://zenodo.org/) repositories. ### Functions Implemented - `.create_project()`: create a new project - `.upload_file()`: upload file to project - `.download_file()`: download a file from a project - `.delete_file()`: permanently removes a file from a project Installing ---------- ### PyPi ```sh pip install zenodopy==0.2.0 ``` ### GitHub ```sh pip install -e git+https://github.com/lgloege/zenodopy.git#egg=zenodopy ``` Using the Package ---------- 1. **Create a Zenodo access token** by first logging into your account and clicking on your username in the top right corner. Navigate to "Applications" and then "+new token" under "Personal access tokens". Keep this window open while you proceed to step 2 because **the token is only displayed once**. 2. **Store the token** in `~/.zenodo_token` using the folowing command ```sh { echo 'ACCESS_TOKEN: your_access_token_here' } > ~/.zenodo_token ``` 3. **start using the `zenodopy` package** ```python import zenodopy # always start by creating a Client object zeno = zenodopy.Client() # list projects zeno.list_projects # list files zeno.list_files # create a projects zeno.create_project(title="test_project", upload_type="other") # your zeno object now points to this newly created project # create a file to upload with open("~/test_file.txt", "w+") as f: f.write("Hello from zenodopy") # upload file to zenodo zeno.upload_file("~/test.file.txt") ``` Notes ---------- This project is under active development. Here is a list of things that needs improvement: - **more tests**: need to test uploading and downloading files - **documentation**: need to setup a readthedocs - **download based on DOI**: right now you can only download from your own projects. Would be nice to download from - **asyncronous functions**: use `asyncio` and `aiohttp` to write async functions. This will speed up downloading multiple files.
zenodopy
/zenodopy-0.3.0.tar.gz/zenodopy-0.3.0/README.md
README.md
# zenoh-ros-type-python The repository contains common class for ROS 2 messages used by Zenoh. The messages come from: * [common_interface](https://github.com/ros2/common_interfaces): Common-used ROS message * [rcl_interface](https://github.com/ros2/rcl_interfaces): Common interface in RCL * [autoware_auto_msgs](https://github.com/tier4/autoware_auto_msgs/tree/tier4/main): messages used in Autoware * [tier4_autoware_msgs](https://github.com/tier4/tier4_autoware_msgs/tree/tier4/universe): messages used in Autoware # Usage You can download the packages via [PyPI](https://pypi.org/project/zenoh-ros-type/). ```shell python3 -m pip install zenoh-ros-type ```
zenoh-ros-type
/zenoh-ros-type-0.2.0.tar.gz/zenoh-ros-type-0.2.0/README.md
README.md
from dataclasses import dataclass from pycdr2 import IdlStruct, Enum from pycdr2.types import int8, uint8, uint16, uint32, float64, sequence, array from std_msgs import Header from geometry_msgs import Quaternion, Vector3 @dataclass class RegionOfInterest(IdlStruct, typename="RegionOfInterest"): x_offset: uint32 y_offset: uint32 height: uint32 width: uint32 do_rectify: bool @dataclass class CameraInfo(IdlStruct, typename="CameraInfo"): header: Header height: uint32 width: uint32 distortion_model: str d: sequence[float64] k: array[float64, 9] r: array[float64, 9] p: array[float64, 12] binning_x: uint32 binning_y: uint32 roi: RegionOfInterest @dataclass class Image(IdlStruct, typename="Image"): header: Header height: uint32 width: uint32 encoding: str is_bigendian: uint8 step: uint32 data: sequence[uint8] @dataclass class IMU(IdlStruct, typename="IMU"): header: Header orientation: Quaternion orientation_covariance: array[float64, 9] angular_velocity: Vector3 angular_velocity_covariance: array[float64, 9] linear_acceleration: Vector3 linear_acceleration_covariance: array[float64, 9] @dataclass class NavSatStatus(IdlStruct, typename="NavSatStatus"): class STATUS(Enum): NO_FIX = -1 # unable to fix position FIX = 0 # unaugmented fix SBAS_FIX = 1 # with satellite-based augmentation GBAS_FIX = 2 # with ground-based augmentation status: int8 class SERVICE(Enum): GPS = 1 GLONASS = 2 COMPASS = 4 # includes BeiDou GALILEO = 8 service: uint16 @dataclass class NavSatFix(IdlStruct, typename="NavSatFix"): header: Header status: NavSatStatus latitude: float64 longitude: float64 altitude: float64 position_covariance: array[float64, 9] class POSITION_COVARIANCE_TYPE(Enum): UNKNOWN = 0 APPROXIMATED = 1 DIAGONAL_KNOWN = 2 KNOWN = 3 position_covariance_type: uint8 @dataclass class PointField(IdlStruct, typename="PointField"): name: str offset: uint32 class DATA_TYPE(Enum): INT8 = 1 UINT8 = 2 INT16 = 3 UINT16 = 4 INT32 = 5 UINT32 = 6 FLOAT32 = 7 FLOAT64 = 8 datatype: uint8 count: uint32 @dataclass class PointCloud2(IdlStruct, typename="PointCloud2"): header: Header height: uint32 width: uint32 fields: sequence[PointField] is_bigendian: bool point_step: uint32 row_step: uint32 data: sequence[uint8] is_dense: bool
zenoh-ros-type
/zenoh-ros-type-0.2.0.tar.gz/zenoh-ros-type-0.2.0/zenoh_ros_type/common_interfaces/sensor_msgs.py
sensor_msgs.py
[![Build Status](https://travis-ci.com/atolab/zenoh-python.svg?branch=master)](https://travis-ci.com/atolab/zenoh-python) [![codecov](https://codecov.io/gh/atolab/zenoh-python/branch/master/graph/badge.svg)](https://codecov.io/gh/atolab/zenoh-python) [![Documentation Status](https://readthedocs.org/projects/zenoh-python/badge/?version=latest)](https://zenoh-python.readthedocs.io/en/latest/?badge=latest) [![Gitter](https://badges.gitter.im/atolab/zenoh.svg)](https://gitter.im/atolab/zenoh?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) # Zenoh Python API [Zenoh](http://zenoh.io) is an extremely efficient and fault-tolerant [Named Data Networking](http://named-data.net) (NDN) protocol that is able to scale down to extremely constrainded devices and networks. The Python API is for pure clients, in other terms does not support peer-to-peer communication, can be easily tested with our demo instace available at **demo.zenoh.io**. ## Dependencies The zenoh-python API depends on the [zenoh-c](https://github.com/atolab/zenoh-c) API. Thus the first thing to do is to ensure that **zenoh-c** in installed on your machine. To do so, please follow the instructions provided [here](https://github.com/atolab/zenoh-c/blob/master/README.md). ## Installing the Python API from Sources To install the API you can do: $ python3 setup.py install Notice that on some platforms, such as Linux, you will need to do this as *sudo*. <!-- ## Installing the API from PyPi You can also install the [zenoh](http://zenoh.io)'s python API from PyPi by simply doing: pip3 install zenoh --> ## Running the Examples To run the bundled examples without installing any additional software you can the **zenoh** demo instance available at **demo.zenoh.io**. To do so, simply run as follows: $ cd zenoh-python/example $ python3 sub.py -z demo.zenoh.io From another terminal: $ cd zenoh-python/example $ python3 sub.py -z demo.zenoh.io
zenoh
/zenoh-0.3.0.tar.gz/zenoh-0.3.0/README.md
README.md
=========== Zen-O-Matic =========== Shows the Zen of Python in your Applications :Author: Chris Maillefaud :Version: 1.0.0 :Python Version: 2.7, 3.5 and 3.6 :Date: October, 16, 2017 Install and Use --------------- Install with pip: `pip install zenomatic` Use 1. Run from command line: Options: - 'random': show a random quote - 'seed <number>': show numbererd quote - 'all': show all quotes Example: `zenomatic random` 2. Run inside application: `from zenomatic import get_quote()` `ret, quote = get_quote(1)` `if ret:` `print(quote)` Avaliable commands ------------------ `get_quote(number)` Get specific quote from 0 to 18 `get_quotes()` Get all quotes
zenomatic
/zenomatic-1.0.1.tar.gz/zenomatic-1.0.1/README.rst
README.rst
MIT License Copyright (c) 2022 Ángel Alexander Cabrera Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
zenoml
/zenoml-0.6.4-py3-none-any.whl/zenoml-0.6.4.dist-info/LICENSE.md
LICENSE.md
import datetime import os import pickle import shutil import sys from contextlib import contextmanager from importlib import util from inspect import getmembers, isfunction from pathlib import Path from typing import Any, Callable, Dict, List, Union import pandas as pd import requests import tomli from zeno.api import ZenoParameters from zeno.classes.base import MetadataType, ZenoColumn VIEW_MAP_URL: str = "https://raw.githubusercontent.com/zeno-ml/instance-views/0.3/" VIEWS_MAP_JSON: str = "views.json" def read_pickle(file_name: str, cache_path: str, default): try: with open(os.path.join(cache_path, file_name), "rb") as f: return pickle.load(f) except FileNotFoundError: return default def get_metadata_type(col: pd.Series) -> MetadataType: try: datetime.datetime.fromisoformat(str(col[0])) return MetadataType.DATETIME except ValueError: pass if col.dtype == "bool" or col.dtype == "boolean": return MetadataType.BOOLEAN try: unique = col.unique().tolist() except TypeError: return MetadataType.OTHER if len(unique) < 21: return MetadataType.NOMINAL if col.dtype in [ "int64", "int32", "int16", "int8", "Int64", "Int32", "Int16", "Int8", "uint64", "uint32", "uint16", "uint8", "Uint64", "Uint32", "Uint16", "Uint8", "float64", "float32", "float16", "float8", "Float64", "Float32", "Float16", "Float8", ]: return MetadataType.CONTINUOUS return MetadataType.OTHER def load_series(df, col_name, save_path): try: series = pd.read_pickle(save_path) col_name.metadata_type = get_metadata_type(series) df.loc[:, str(col_name)] = series except FileNotFoundError: df.loc[:, str(col_name)] = pd.Series([pd.NA] * df.shape[0], index=df.index) except EOFError: df.loc[:, str(col_name)] = pd.Series([pd.NA] * df.shape[0], index=df.index) @contextmanager def add_to_path(p): old_path = sys.path sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path def read_config(args: Union[str, Dict, ZenoParameters]) -> ZenoParameters: params: ZenoParameters if isinstance(args, str): path = os.path.abspath(args) try: with open(path, "rb") as f: args_dict = tomli.load(f) except Exception: print("ERROR: Failed to read TOML configuration file.") sys.exit(1) params = ZenoParameters.parse_obj(args_dict) params.config_file = path # Change working directory to the directory of the config file. os.chdir(os.path.dirname(path)) elif isinstance(args, dict): params = ZenoParameters.parse_obj(args) elif isinstance(args, ZenoParameters): params = args else: sys.exit(1) if params.cache_path == "": params.cache_path = "./.zeno_cache/" else: params.cache_path = params.cache_path os.makedirs(params.cache_path, exist_ok=True) # Try to get view from GitHub List, if not try to read from path and copy it. if params.view != "": view_dest_path = Path(os.path.join(params.cache_path, "view.mjs")) view_path = Path(params.view) if view_path.is_file(): if view_dest_path.is_file(): os.remove(view_dest_path) shutil.copyfile(view_path, view_dest_path) else: try: views_res = requests.get(VIEW_MAP_URL + VIEWS_MAP_JSON) views = views_res.json() url = VIEW_MAP_URL + views[params.view] with open(view_dest_path, "wb") as out_file: content = requests.get(url, stream=True).content out_file.write(content) except KeyError: print( "ERROR: View not found in list or relative path." " See available views at ", "https://github.com/zeno-ml/instance-views/blob/main/views.json", ) sys.exit(1) if params.id_column == "": print( "WARNING: no id_column specified, using index as id_column. If you are", "using a data_column, suggest using it as id_column.", ) return params def read_metadata(meta: Union[str, pd.DataFrame]) -> pd.DataFrame: """Return DataFrame or try to read it from file""" if isinstance(meta, pd.DataFrame): return meta elif isinstance(meta, str): meta_path = Path(os.path.realpath(meta)) if meta_path.suffix == ".csv": return pd.read_csv(meta_path) elif meta_path.suffix == ".tsv": return pd.read_csv( meta_path, sep="\t", header=0, quoting=3, keep_default_na=False ) elif meta_path.suffix == ".parquet": return pd.read_parquet(meta_path) elif meta_path.suffix == ".jsonl": return pd.read_json(meta_path, lines=True) print( "ERROR: Failed to read metadata file " + meta + "\n Should be one of .csv, .jsonl, or .parquet" ) sys.exit(1) def load_function(test_file: Path) -> List[Callable[..., Any]]: # To allow relative imports in test files, # add their directory to path temporarily. with add_to_path(os.path.dirname(os.path.abspath(test_file))): spec = util.spec_from_file_location(str(test_file), test_file) test_module = util.module_from_spec(spec) # type: ignore spec.loader.exec_module(test_module) # type: ignore functions: List[Callable[..., Any]] = [] for _, func in getmembers(test_module): if isfunction(func): if ( hasattr(func, "predict_function") or hasattr(func, "distill_function") or hasattr(func, "metric_function") ): functions.append(func) return functions def read_functions(fns: Union[List[Callable], str]) -> List[Callable]: if isinstance(fns, list): return fns elif isinstance(fns, str): fn_path = Path(os.path.realpath(fns)) if os.path.isfile(fn_path): return load_function(fn_path) elif os.path.exists(fn_path): # Add directory with tests to path for relative imports. fns = [] for f in list(fn_path.rglob("*.py")): fns = fns + load_function(f) return fns return [] def is_notebook() -> bool: try: from IPython.core.getipython import get_ipython shell = get_ipython().__class__.__name__ if shell == "ZMQInteractiveShell": return True # Jupyter notebook or qtconsole elif shell == "TerminalInteractiveShell": return False # Terminal running IPython else: return False # Other type (?) except (NameError, ImportError): return False # Probably standard Python interpreter def generate_diff_cols( df: pd.DataFrame, diff_col_1: ZenoColumn, diff_col_2: ZenoColumn ) -> pd.DataFrame: """Generate a new column of differences based on the original dataframe and specified columns. Args: df (DataFrame): The original dataframe. diff_col_1 (ZenoColumn): The first column used to calculate the difference. diff_col_2 (ZenoColumn): The second column used to calculate the difference. Returns: DataFrame: Return the new dataframe containing the diff column. """ if ( diff_col_1.column_type != diff_col_2.column_type or diff_col_1.metadata_type != diff_col_2.metadata_type ): print("error: different column types!") return df # various metadata type difference if diff_col_1.metadata_type == MetadataType.CONTINUOUS: # force the column type being float if "diff" in df.columns: df["diff"] = df["diff"].astype(float) df.loc[:, "diff"] = df[str(diff_col_1)].astype(float) - df[ str(diff_col_2) ].astype(float) else: df.loc[:, "diff"] = df[str(diff_col_1)] != df[str(diff_col_2)] return df
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/util.py
util.py
import sys from typing import Dict, Union import pkg_resources import uvicorn from multiprocess import Process # type: ignore from zeno.api import ZenoParameters from zeno.backend import ZenoBackend from zeno.server import get_server from zeno.setup import setup_zeno from zeno.util import is_notebook, read_config # Global variable to hold the Zeno server process. # This is used to kill the server when re-running in a notebook. ZENO_SERVER_PROCESS = None def command_line(): if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "--help": print( "\n \033[1mZeno\033[0m", pkg_resources.get_distribution("zenoml").version, " - Machine learning evaluation framework.", "\n\n", "\033[1mUSAGE \033[0m \n\t", "zeno [-h] [-v] <config.toml>", "\n\n", "\033[1mARGUMENTS \033[0m \n\t", "<config.toml>\t\tZeno configuration file.\n\n" "\033[1m GLOBAL OPTIONS \033[0m \n\t", "-h (--help)\t\tDisplay this help message.\n" "\t -v (--version)\t\tDisplay this application version.\n", ) sys.exit(0) if len(sys.argv) != 2: print( "ERROR: Zeno take one argument, either a configuration TOML file" + " or the keyword 'init'. " + "{0} arguments were passed.", len(sys.argv), ) sys.exit(1) if sys.argv[1] == "-v" or sys.argv[1] == "--version": print(pkg_resources.get_distribution("zenoml").version) sys.exit(0) if sys.argv[1] == "init" or sys.argv[1] == "i": setup_zeno() else: zeno(sys.argv[1]) def run_zeno(params: ZenoParameters): zeno = ZenoBackend(params) app = get_server(zeno) zeno.start_processing() print( "\n\033[1mZeno\033[0m running on http://{}:{}\n".format( params.host, params.port ) ) uvicorn.run(app, host=params.host, port=params.port, log_level="error") def zeno(args: Union[str, ZenoParameters, Dict]): """Main entrypoint for Zeno. This is called directly by the user in a notebook or script, or called by the command_line function when run by CLI. Args: args (Union[str, ZenoParameters, Dict]): The configuration for Zeno. ZenoParameters or dict when called from Python, str if called from commandline. """ params = read_config(args) if params.serve: global ZENO_SERVER_PROCESS if ZENO_SERVER_PROCESS is not None: ZENO_SERVER_PROCESS.terminate() ZENO_SERVER_PROCESS = Process( target=run_zeno, args=(params,), ) ZENO_SERVER_PROCESS.start() if not is_notebook(): ZENO_SERVER_PROCESS.join() else: zeno = ZenoBackend(params) return zeno
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/runner.py
runner.py
import functools from typing import Any, Callable, Dict, List, Union from numpy.typing import NDArray from pandas import DataFrame, Series from pydantic import BaseModel class ZenoOptions(BaseModel): """Parameters passed to Zeno test functions. Args: id_column (str): Column in dataframe with unique identifiers. data_column (str): Column in dataframe with either raw data or path to data. label_column (str): Column in dataframe with either raw labels or path to labels. output_column (str): Column in dataframe with a given model's raw output or path to output data_path (str): Path to directory with data files. label_path (str): Path to directory with label files. output_path (str): Path to directory with a given model's output. distill_columns (map[str, str]): Map from distill function name to distill column. """ id_column: str data_column: str label_column: str output_column: str distill_columns: Dict[str, str] data_path: str label_path: str output_path: str class ZenoParameters(BaseModel): """Options passed to the backend processing pipeline.""" metadata: Union[DataFrame, str] # If run from command line, config_file will be a path to a config file. config_file: str = "" functions: Union[List[Callable], str] = [] view: str = "" models: List[str] = [] id_column: str = "" data_column: str = "" label_column: str = "" data_path: str = "" label_path: str = "" batch_size: int = 1 cache_path: str = "" calculate_histogram_metrics = True editable: bool = True multiprocessing: bool = True serve: bool = True samples: int = 30 port: int = 8000 host: str = "localhost" class Config: arbitrary_types_allowed = True class ModelReturn(BaseModel): """Return type for model functions. Args: model_output (Series | List): Model output for each sample. embedding (Series | List[List[float]] | List[NDArray] | NDArray | None): High-dimensional embedding for each sample. Optional. other_returns (Dict[str, Series | List] | None): Other returns from the model to be shown as metadata columns in the UI. """ model_output: Union[Series, List[Any]] embedding: Union[Series, List[List[float]], List[NDArray], NDArray, None] = None other_returns: Union[Dict[str, Union[Series, List[Any]]], None] = None class Config: arbitrary_types_allowed = True class DistillReturn(BaseModel): """Return type for distill functions Args: distill_output (Series | List): Distill outputs for each sample. """ distill_output: Union[Series, List[Any]] class Config: arbitrary_types_allowed = True class MetricReturn(BaseModel): """Return type for metric functions. Args: metric (float): Average metric over subset of data """ metric: float class Config: arbitrary_types_allowed = True def model(func: Callable[[str], Callable[[DataFrame, ZenoOptions], ModelReturn]]): """Decorator function for model functions. Args: func (Callable[[str], Callable[[DataFrame, ZenoOptions], ModelReturn]]): A function that that takes a model name and returns a model function, which itself returns a function that takes a DataFrame and ZenoOptions and returns a ModelReturn. """ @functools.wraps(func) def _wrapper(*args, **kwargs): return func(*args, **kwargs) _wrapper.predict_function = True # type: ignore return _wrapper def distill(func: Callable[[DataFrame, ZenoOptions], DistillReturn]): """Deocrator function for distill functions. Args: func (Callable[[DataFrame, ZenoOptions], DistillReturn]): A function that takes a DataFrame and ZenoOptions and returns a DistillReturn. """ @functools.wraps(func) def _wrapper(*args, **kwargs): return func(*args, **kwargs) _wrapper.distill_function = True # type: ignore return _wrapper def metric(func: Callable[[DataFrame, ZenoOptions], MetricReturn]): """Decorator function for metric functions. Args: func (Callable[[DataFrame, ZenoOptions], MetricReturn]): A metric function that takes a DataFrame and ZenoOptions and returns a MetricReturn with an average metric value and optional error rate. """ @functools.wraps(func) def _wrapper(*args, **kwargs): return func(*args, **kwargs) _wrapper.metric_function = True # type: ignore return _wrapper
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/api.py
api.py
import asyncio import os from typing import Dict, List, Union from fastapi import FastAPI, HTTPException, WebSocket from fastapi.routing import APIRoute from fastapi.staticfiles import StaticFiles from zeno.backend import ZenoBackend from zeno.classes.base import ZenoColumn from zeno.classes.classes import ( ColorsProjectRequest, EmbedProject2DRequest, EntryRequest, MetricRequest, PlotRequest, StatusResponse, TableRequest, ZenoSettings, ZenoVariables, ) from zeno.classes.metadata import HistogramBucket, HistogramRequest, StringFilterRequest from zeno.classes.projection import Points2D, PointsColors from zeno.classes.report import Report from zeno.classes.slice import GroupMetric, Slice from zeno.classes.slice_finder import SliceFinderRequest, SliceFinderReturn from zeno.classes.tag import Tag, TagMetricKey from zeno.processing.histogram_processing import ( filter_by_string, histogram_buckets, histogram_counts, histogram_metrics, ) from zeno.processing.projection_processing import ( check_embed_exists, project_into_2d, projection_colors, ) from zeno.processing.slice_finder import slice_finder from zeno.util import read_config def custom_generate_unique_id(route: APIRoute): return route.name def get_server(zeno: ZenoBackend): app = FastAPI(title="Frontend API") api_app = FastAPI( title="Backend API", generate_unique_id_function=custom_generate_unique_id ) if zeno.data_path != "" and os.path.exists(zeno.data_path): app.mount("/data", StaticFiles(directory=zeno.data_path), name="static") if zeno.label_path != "" and os.path.exists(zeno.label_path): app.mount("/labels", StaticFiles(directory=zeno.label_path), name="static") app.mount( "/cache", StaticFiles(directory=zeno.cache_path), name="cache", ) app.mount("/api", api_app) app.mount( "/", StaticFiles( directory=os.path.dirname(os.path.realpath(__file__)) + "/frontend", html=True, ), name="base", ) @api_app.get("/settings", response_model=ZenoSettings, tags=["zeno"]) def get_settings(): return ZenoSettings( view=zeno.view, id_column=zeno.id_column, label_column=zeno.label_column, data_column=zeno.data_column, calculate_histogram_metrics=zeno.calculate_histogram_metrics, samples=zeno.samples, total_size=zeno.df.shape[0], ) @api_app.get("/initialize", response_model=ZenoVariables, tags=["zeno"]) def get_initial_info(): return ZenoVariables( metrics=list(zeno.metric_functions.keys()), models=[str(n) for n in zeno.model_names], folders=zeno.folders, ) @api_app.get("/slices", response_model=Dict[str, Slice], tags=["zeno"]) def get_slices(): return zeno.slices @api_app.get("/tags", response_model=Dict[str, Tag], tags=["zeno"]) def get_tags(): return zeno.tags @api_app.get("/reports", response_model=List[Report], tags=["zeno"]) def get_reports(): return zeno.reports @api_app.post("/folders", tags=["zeno"]) def set_folders(folders: List[str]): zeno.set_folders(folders) @api_app.post("/reports", tags=["zeno"]) def update_reports(reqs: List[Report]): zeno.set_reports(reqs) @api_app.post("/filtered-ids", response_model=str, tags=["zeno"]) def get_filtered_ids(req: PlotRequest): return zeno.get_filtered_ids(req) @api_app.post("/filtered-table", response_model=str, tags=["zeno"]) def get_filtered_table(req: TableRequest): return zeno.get_filtered_table(req) @api_app.get("/refresh", tags=["zeno"]) def refresh_data(): if not zeno.editable: return if zeno.params.config_file: zeno.params = read_config(zeno.params.config_file) zeno.done_running_inference = False zeno.initial_setup() zeno.start_processing() @api_app.post( "/histograms", response_model=List[List[HistogramBucket]], tags=["zeno"] ) def get_histogram_buckets(req: List[ZenoColumn]): return histogram_buckets(zeno.df, req) @api_app.post("/histogram-counts", response_model=List[List[int]], tags=["zeno"]) def calculate_histogram_counts(req: HistogramRequest): return histogram_counts(zeno.df, req) @api_app.post( "/histogram-metrics", response_model=List[List[Union[float, None]]], tags=["zeno"], ) def calculate_histogram_metrics(req: HistogramRequest): return histogram_metrics(zeno.df, zeno.calculate_metric, req) @api_app.post("/tag", tags=["zeno"]) def create_new_tag(req: Tag): zeno.create_new_tag(req) @api_app.delete("/tag", tags=["zeno"]) def delete_tag(tag_name: List[str]): zeno.delete_tag(tag_name[0]) @api_app.post("/slice", tags=["zeno"]) def create_new_slice(req: Slice): zeno.create_new_slice(req) @api_app.delete("/slice", tags=["zeno"]) def delete_slice(slice_name: List[str]): zeno.delete_slice(slice_name[0]) @api_app.post("/string-filter", response_model=List[str], tags=["zeno"]) def filter_string_metadata(req: StringFilterRequest): filt_out = filter_by_string(zeno.df, req) return filt_out @api_app.post("/slice-metrics", response_model=List[GroupMetric], tags=["zeno"]) def get_metrics_for_slices(req: MetricRequest): return zeno.get_metrics_for_slices(req.metric_keys, req.filter_ids) @api_app.post("/slice-tag-metrics", response_model=List[GroupMetric], tags=["zeno"]) def get_metrics_for_slices_and_tags(req: MetricRequest): return zeno.get_metrics_for_slices_and_tags( req.metric_keys, req.tag_ids, req.filter_ids, req.tag_list ) @api_app.post("/tag-metrics", response_model=List[GroupMetric], tags=["zeno"]) def get_metrics_for_tags(req: List[TagMetricKey]): return zeno.get_metrics_for_tags(req) @api_app.get("/embed-exists/{model}", response_model=bool, tags=["zeno"]) def embed_exists(model: str): """Checks if embedding exists for a model. Returns the boolean True or False directly """ return check_embed_exists(zeno.df, model) @api_app.post("/embed-project", tags=["zeno"], response_model=Points2D) def project_embed_into_2d(req: EmbedProject2DRequest): return project_into_2d(zeno.df, zeno.id_column, req.model, req.column) @api_app.post("/slice-finder", tags=["zeno"], response_model=SliceFinderReturn) def run_slice_finder(req: SliceFinderRequest): return slice_finder(zeno.df, req) @api_app.post("/colors-project", tags=["zeno"], response_model=PointsColors) def get_projection_colors(req: ColorsProjectRequest): return projection_colors(zeno.df, req.column) @api_app.post("/entry", tags=["zeno"], response_model=str) def get_df_row_entry(req: EntryRequest): try: entry = zeno.df.loc[req.id, :].copy() if len(req.columns) > 0: entry = entry[list(map(str, req.columns))] # Add data prefix to data column depending on type of data_path. entry.loc[str(zeno.data_column)] = ( zeno.data_prefix + entry[str(zeno.data_column)] ) return entry.to_json() except KeyError: raise HTTPException( status_code=404, detail=f"Entry with id={req.id} not found" ) @api_app.websocket("/status") async def results_websocket(websocket: WebSocket): await websocket.accept() previous_status = "" while True: await asyncio.sleep(1) if zeno.status != previous_status: previous_status = zeno.status await websocket.send_json( StatusResponse( status=zeno.status, done_processing=zeno.done_running_inference, complete_columns=zeno.complete_columns, ).json(by_alias=True) ) return app
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/server.py
server.py
import asyncio import glob import logging import os import pickle import sys import threading from inspect import getsource from pathlib import Path from typing import Callable, Dict, List, Optional, Union import pandas as pd from pandas import DataFrame from pathos.multiprocessing import ProcessingPool as Pool from zeno.api import ( DistillReturn, MetricReturn, ModelReturn, ZenoOptions, ZenoParameters, ) from zeno.classes.base import DataProcessingReturn, MetadataType, ZenoColumnType from zeno.classes.classes import MetricKey, PlotRequest, TableRequest, ZenoColumn from zeno.classes.report import Report from zeno.classes.slice import FilterIds, FilterPredicateGroup, GroupMetric, Slice from zeno.classes.tag import Tag, TagMetricKey from zeno.processing.data_processing import ( postdistill_data, predistill_data, run_inference, ) from zeno.processing.filtering import filter_table from zeno.util import ( generate_diff_cols, get_metadata_type, load_series, read_functions, read_metadata, read_pickle, ) class ZenoBackend(object): def __init__(self, args: ZenoParameters): logging.basicConfig(level=logging.INFO) self.params = args self.initial_setup() def initial_setup(self) -> None: self.metadata = self.params.metadata self.functions = self.params.functions self.batch_size = self.params.batch_size self.data_path = self.params.data_path self.label_path = self.params.label_path self.cache_path = self.params.cache_path self.multiprocessing = self.params.multiprocessing self.editable = self.params.editable self.samples = self.params.samples self.view = self.params.view self.calculate_histogram_metrics = self.params.calculate_histogram_metrics self.model_names = self.params.models self.df = read_metadata(self.metadata) self.tests = read_functions(self.functions) self.data_prefix = "" if self.data_path.startswith("http"): self.data_prefix = self.data_path elif self.data_path != "": self.data_prefix = "/data/" self.done_running_inference = False self.predistill_functions: Dict[ str, Callable[[DataFrame, ZenoOptions], DistillReturn] ] = {} self.postdistill_functions: Dict[ str, Callable[[DataFrame, ZenoOptions], DistillReturn] ] = {} self.metric_functions: Dict[ str, Callable[[DataFrame, ZenoOptions], MetricReturn] ] = {} self.predict_function: Optional[ Callable[[str], Callable[[DataFrame, ZenoOptions], ModelReturn]] ] = None self.gradio_input_columns: List[str] = [] self.status: str = "Initializing" self.folders: List[str] = read_pickle("folders.pickle", self.cache_path, []) self.reports: List[Report] = read_pickle("reports.pickle", self.cache_path, []) self.slices: Dict[str, Slice] = read_pickle( "slices.pickle", self.cache_path, {} ) self.tags: Dict[str, Tag] = read_pickle("tags.pickle", self.cache_path, {}) if "All Instances" not in self.slices: orig_slices = self.slices all_instance = Slice( slice_name="All Instances", folder="", filter_predicates=FilterPredicateGroup(predicates=[], join=""), ) self.slices = {"All Instances": all_instance} self.slices.update(orig_slices) self.__setup_dataframe( self.params.id_column, self.params.data_column, self.params.label_column ) self.__parse_test_functions(self.tests) # Options passed to Zeno functions. self.zeno_options = ZenoOptions( id_column=str(self.id_column), data_column=str(self.data_column), label_column=str(self.label_column), distill_columns=dict(), data_path=self.data_path, label_path=self.label_path, output_column="", output_path="", ) def __setup_dataframe(self, id_column: str, data_column: str, label_column: str): if data_column != "": if data_column != id_column: self.data_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=get_metadata_type(self.df[data_column]), name=data_column, ) else: # make sure id and data column are different self.df["data"] = self.df[data_column] self.data_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=get_metadata_type(self.df["data"]), name="data", ) else: self.data_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=MetadataType.OTHER, name="", ) if label_column != "": self.label_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=get_metadata_type(self.df[label_column]), name=label_column, ) else: self.label_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=MetadataType.OTHER, name="", ) if id_column != "": self.id_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=MetadataType.OTHER, name=id_column, ) self.df[str(self.id_column)].astype(str) else: self.df = self.df.reset_index() self.id_column = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=MetadataType.OTHER, name="index", ) self.columns: List[ZenoColumn] = [] self.complete_columns: List[ZenoColumn] = [] self.df = self.df.set_index(str(self.id_column), drop=False) # Set index name to None to prevent name overlaps w/ columns. self.df.index.name = None for metadata_col in self.df.columns: col = ZenoColumn( column_type=ZenoColumnType.METADATA, metadata_type=get_metadata_type(self.df[metadata_col]), name=str(metadata_col), ) self.columns.append(col) self.complete_columns.append(col) def __parse_test_functions(self, tests: List[Callable]): for test_fn in tests: if hasattr(test_fn, "predict_function"): if self.predict_function is None: self.predict_function = test_fn else: print("ERROR: Multiple model functions found, can only have one") sys.exit(1) if hasattr(test_fn, "distill_function"): src = getsource(test_fn) if "output_column" in src: self.postdistill_functions[test_fn.__name__] = test_fn else: self.predistill_functions[test_fn.__name__] = test_fn if hasattr(test_fn, "metric_function"): self.metric_functions[test_fn.__name__] = test_fn def start_processing(self): """Parse testing files, distill, and run inference.""" if not self.tests: self.done_running_inference = True self.status = "Done processing" return for fn in self.predistill_functions.values(): self.columns.append( ZenoColumn(column_type=ZenoColumnType.PREDISTILL, name=fn.__name__) ) for fn in self.postdistill_functions.values(): for m in self.model_names: self.columns.append( ZenoColumn( column_type=ZenoColumnType.POSTDISTILL, name=fn.__name__, model=m, ) ) self.__thread = threading.Thread( target=asyncio.run, args=(self.__process(),), daemon=True ) self.__thread.start() async def __process(self): self.status = "Running predistill functions" print(self.status) self.__predistill() self.status = "Running inference" print(self.status) self.__inference() self.done_running_inference = True self.status = "Running postdistill functions" print(self.status) self.__postdistill() self.status = "Done processing" print(self.status) def __set_data_processing_returns(self, rets: List[List[DataProcessingReturn]]): """Update DataFrame with new columns from processing functions. Args: rets (List[List[DataProcessingReturn]]): List of returns from decorated functions. """ for ret in rets: for out in ret: c_hash = str(out.column) self.df.loc[:, c_hash] = out.output self.df[c_hash] = self.df[c_hash].convert_dtypes() out.column.metadata_type = get_metadata_type(self.df[c_hash]) self.complete_columns.append(out.column) def __predistill(self) -> None: """Run distilling functions not dependent on model outputs.""" # Check if we need to preprocess since Pool is expensive predistill_to_run: List[ZenoColumn] = [] for predistill_column in [ c for c in self.columns if c.column_type == ZenoColumnType.PREDISTILL ]: save_path = Path(self.cache_path, str(predistill_column) + ".pickle") load_series(self.df, predistill_column, save_path) predistill_hash = str(predistill_column) if self.df[predistill_hash].isna().any(): predistill_to_run.append(predistill_column) else: self.df[predistill_hash] = self.df[predistill_hash].convert_dtypes() predistill_column.metadata_type = get_metadata_type( self.df[predistill_hash] ) self.complete_columns.append(predistill_column) if len(predistill_to_run) > 0: if self.multiprocessing: with Pool() as pool: predistill_outputs = pool.map( predistill_data, [ self.predistill_functions[col.name] for col in predistill_to_run ], [col for col in predistill_to_run], [self.zeno_options] * len(predistill_to_run), [self.cache_path] * len(predistill_to_run), [self.df] * len(predistill_to_run), [self.batch_size] * len(predistill_to_run), range(len(predistill_to_run)), ) self.__set_data_processing_returns(predistill_outputs) else: predistill_outputs = [] for i, predistill in enumerate(predistill_to_run): predistill_outputs.append( predistill_data( self.predistill_functions[predistill.name], predistill, self.zeno_options, self.cache_path, self.df, self.batch_size, i, ) ) self.__set_data_processing_returns(predistill_outputs) def __inference(self): """Run models on instances.""" # Check if we need to run inference since Pool is expensive models_to_run = [] for model_name in self.model_names: model_column = ZenoColumn( column_type=ZenoColumnType.OUTPUT, name="output", model=model_name ) embedding_column = ZenoColumn( column_type=ZenoColumnType.EMBEDDING, name="embedding", model=model_name ) model_hash = str(model_column) embedding_hash = str(embedding_column) model_save_path = Path(self.cache_path, model_hash + ".pickle") embedding_save_path = Path(self.cache_path, embedding_hash + ".pickle") load_series(self.df, model_column, model_save_path) load_series(self.df, embedding_column, embedding_save_path) if self.df[model_hash].isna().any(): models_to_run.append(model_name) else: self.df[model_hash] = self.df[model_hash].convert_dtypes() model_column.metadata_type = get_metadata_type(self.df[model_hash]) self.complete_columns.append(model_column) # Check if there were saved postdistill columns: for f in glob.glob( os.path.join( self.cache_path, "POSTDISTILL*" + model_name + ".pickle" ) ): name = os.path.basename(f).split(model_name)[0][11:] col = ZenoColumn( column_type=ZenoColumnType.POSTDISTILL, name=name, model=model_name, ) series = pd.read_pickle(f) self.df.loc[:, str(col)] = series self.df[str(col)] = self.df[str(col)].convert_dtypes() col.metadata_type = get_metadata_type(self.df[str(col)]) self.complete_columns.append(col) if len(models_to_run) > 0 and self.predict_function is not None: if self.multiprocessing: with Pool() as pool: inference_outputs = pool.map( run_inference, [self.predict_function] * len(models_to_run), [self.zeno_options] * len(models_to_run), [m for m in models_to_run], [self.cache_path] * len(models_to_run), [self.df] * len(models_to_run), [self.batch_size] * len(models_to_run), range(len(models_to_run)), ) else: inference_outputs = [] for i, model_name in enumerate(models_to_run): inference_outputs.append( run_inference( self.predict_function, self.zeno_options, model_name, self.cache_path, self.df, self.batch_size, i, ) ) self.__set_data_processing_returns(inference_outputs) def __postdistill(self) -> None: """Run distill functions dependent on model outputs.""" # Check if we need to run postprocessing since Pool is expensive postdistill_to_run: List[ZenoColumn] = [] for postdistill_column in [ c for c in self.columns if c.column_type == ZenoColumnType.POSTDISTILL ]: col_name = postdistill_column.copy( update={ "model": postdistill_column.model, } ) col_hash = str(col_name) # If we already loaded in inference, skip. if col_hash in self.df.columns: continue save_path = Path(self.cache_path, col_hash + ".pickle") load_series(self.df, col_name, save_path) if self.df[col_hash].isna().any(): postdistill_to_run.append(col_name) else: self.df[col_hash] = self.df[col_hash].convert_dtypes() col_name.metadata_type = get_metadata_type(self.df[col_hash]) self.complete_columns.append(col_name) if len(postdistill_to_run) > 0: if self.multiprocessing: with Pool() as pool: post_outputs = pool.map( postdistill_data, [ self.postdistill_functions[e.name] for e in postdistill_to_run ], [e.model for e in postdistill_to_run], [self.zeno_options] * len(postdistill_to_run), [self.cache_path] * len(postdistill_to_run), [self.df] * len(postdistill_to_run), [self.batch_size] * len(postdistill_to_run), range(len(postdistill_to_run)), ) else: post_outputs = [] for i, postdistill in enumerate(postdistill_to_run): post_outputs.append( postdistill_data( self.postdistill_functions[postdistill.name], postdistill.model if postdistill.model else "", self.zeno_options, self.cache_path, self.df, self.batch_size, i, ) ) self.__set_data_processing_returns(post_outputs) def get_metrics_for_slices( self, requests: List[MetricKey], filter_ids: Optional[FilterIds] = None, ) -> List[GroupMetric]: """Calculate result for each requested combination.""" return_metrics: List[GroupMetric] = [] for metric_key in requests: # If we refresh, might not have columns for a slice. try: filt_df = filter_table( self.df, metric_key.sli.filter_predicates, filter_ids ) except pd.errors.UndefinedVariableError: return_metrics.append(GroupMetric(metric=None, size=0)) continue if metric_key.metric == "" or self.label_column.name == "": return_metrics.append(GroupMetric(metric=None, size=filt_df.shape[0])) else: metric = self.calculate_metric( filt_df, metric_key.model, metric_key.metric ) return_metrics.append(GroupMetric(metric=metric, size=filt_df.shape[0])) return return_metrics def get_metrics_for_slices_and_tags( self, requests: List[MetricKey], tag_ids: Optional[FilterIds] = None, filter_ids: Optional[FilterIds] = None, tag_list: Optional[List[str]] = None, ) -> List[GroupMetric]: """Calculate result for each requested combination.""" return_metrics: List[GroupMetric] = [] for metric_key in requests: filt_df = filter_table( self.df, metric_key.sli.filter_predicates, tag_ids, filter_ids, tag_list ) if metric_key.metric == "" or self.label_column.name == "": return_metrics.append(GroupMetric(metric=None, size=filt_df.shape[0])) else: metric = self.calculate_metric( filt_df, metric_key.model, metric_key.metric ) return_metrics.append(GroupMetric(metric=metric, size=filt_df.shape[0])) return return_metrics def get_metrics_for_tags(self, requests: List[TagMetricKey]) -> List[GroupMetric]: return_metrics: List[GroupMetric] = [] for tag_metric_key in requests: filt_df = filter_table(self.df, None, tag_metric_key.tag.selection_ids) if tag_metric_key.metric == "" or self.label_column.name == "": return_metrics.append(GroupMetric(metric=None, size=filt_df.shape[0])) else: # if the tag is empty if len(tag_metric_key.tag.selection_ids.ids) == 0: filt_df = self.df.iloc[0:0] metric = self.calculate_metric( filt_df, tag_metric_key.model, tag_metric_key.metric ) return_metrics.append(GroupMetric(metric=metric, size=filt_df.shape[0])) return return_metrics def calculate_metric( self, df: DataFrame, model: Union[str, None], metric: str ) -> Union[float, None]: if not self.done_running_inference: return None if model is not None: output_col = ZenoColumn( column_type=ZenoColumnType.OUTPUT, name="output", model=model ) output_hash = str(output_col) distill_fns = [ c for c in self.columns if ( c.column_type == ZenoColumnType.PREDISTILL or c.column_type == ZenoColumnType.POSTDISTILL ) and c.model == model ] local_ops = self.zeno_options.copy( update={ "output_column": output_hash, "output_path": os.path.join(self.cache_path, output_hash), "distill_columns": dict( zip( [c.name for c in distill_fns], [str(c) for c in distill_fns] ) ), } ) else: distill_fns = [ c for c in self.columns if ( c.column_type == ZenoColumnType.PREDISTILL or c.column_type == ZenoColumnType.POSTDISTILL ) ] local_ops = self.zeno_options.copy( update={ "distill_columns": dict( zip( [c.name for c in distill_fns], [str(c) for c in distill_fns] ) ), } ) return self.metric_functions[metric](df, local_ops).metric def set_folders(self, folders: List[str]): if not self.editable: return self.folders = folders with open(os.path.join(self.cache_path, "folders.pickle"), "wb") as f: pickle.dump(self.folders, f) def create_new_tag(self, req: Tag): if not self.editable: return self.tags[req.tag_name] = req with open(os.path.join(self.cache_path, "tags.pickle"), "wb") as f: pickle.dump(self.tags, f) def delete_tag(self, tag_name: str): if not self.editable: return del self.tags[tag_name] with open(os.path.join(self.cache_path, "tags.pickle"), "wb") as f: pickle.dump(self.tags, f) def set_reports(self, reports: List[Report]): if not self.editable: return self.reports = reports with open(os.path.join(self.cache_path, "reports.pickle"), "wb") as f: pickle.dump(self.reports, f) def create_new_slice(self, req: Slice): if not self.editable: return self.slices[req.slice_name] = req with open(os.path.join(self.cache_path, "slices.pickle"), "wb") as f: pickle.dump(self.slices, f) def delete_slice(self, slice_name: str): if not self.editable: return del self.slices[slice_name] with open(os.path.join(self.cache_path, "slices.pickle"), "wb") as f: pickle.dump(self.slices, f) def get_filtered_ids(self, req: PlotRequest): return filter_table(self.df, req.filter_predicates, req.tag_ids)[ str(self.id_column) ].to_json(orient="records") def get_filtered_table(self, req: TableRequest): """Return filtered table from list of filter predicates.""" filt_df = filter_table( self.df, req.filter_predicates, req.filter_ids, req.tag_ids, req.tag_list ) req_columns = [str(col) for col in req.columns] if req.diff_column_1 and req.diff_column_2: filt_df = generate_diff_cols(filt_df, req.diff_column_1, req.diff_column_2) req_columns.append("diff") if req.sort[0]: filt_df = filt_df.sort_values(str(req.sort[0]), ascending=req.sort[1]) filt_df = filt_df.iloc[req.slice_range[0] : req.slice_range[1]].copy() if self.data_prefix != "": # Add data prefix to data column depending on type of data_path. filt_df.loc[:, str(self.data_column)] = ( self.data_prefix + filt_df[str(self.data_column)] ) return filt_df.loc[:, req_columns].to_json(orient="records")
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/backend.py
backend.py
import re from math import isnan from typing import Callable, List, Union import numpy as np import pandas as pd from zeno.classes.base import MetadataType, ZenoColumn from zeno.classes.metadata import HistogramBucket, HistogramRequest, StringFilterRequest from zeno.processing.filtering import filter_table, filter_table_single def histogram_buckets( df: pd.DataFrame, req: List[ZenoColumn], num_bins: Union[int, str] = "doane" ) -> List[List[HistogramBucket]]: """Calculate the histogram buckets for a list of columns. Args: df (pd.DataFrame): main dataframe from zeno backend req (List[ZenoColumn]): list of columns to compute buckets for num_bins (Union[int, str], optional): estimates the best number and size of bins to use. Defaults to "doane", but can be a fixed integer Other options can be found [here](https://numpy.org/doc/stable/reference/generated/numpy.histogram_bin_edges.html) Returns: List[List[HistogramBucket]]: for each zeno column return a list of buckets """ res: List[List[HistogramBucket]] = [] for col in req: df_col = df[str(col)] if col.metadata_type == MetadataType.NOMINAL: ret_hist: List[HistogramBucket] = [] val_counts = df_col.value_counts() for k in val_counts.keys(): ret_hist.append(HistogramBucket(bucket=k)) res.append(ret_hist) elif col.metadata_type == MetadataType.CONTINUOUS: ret_hist: List[HistogramBucket] = [] df_col = df_col.fillna(0) bins = np.histogram_bin_edges(df_col, bins=num_bins) for i in range(len(bins) - 1): ret_hist.append( HistogramBucket( bucket=bins[i], bucket_end=bins[i + 1], ) ) res.append(ret_hist) elif col.metadata_type == MetadataType.BOOLEAN: res.append( [ HistogramBucket(bucket=True), HistogramBucket(bucket=False), ] ) elif col.metadata_type == MetadataType.DATETIME: res.append([]) else: res.append([]) return res def histogram_counts(df: pd.DataFrame, req: HistogramRequest) -> List[List[int]]: """Calculate count for each bucket in each column histogram.""" if req.filter_predicates is not None: filt_df = filter_table( df, req.filter_predicates, req.tag_ids, req.filter_ids, req.tag_list ) else: filt_df = df ret: List[List[int]] = [] for r in req.column_requests: col = r.column if str(col) not in filt_df.columns: ret.append([]) elif col.metadata_type == MetadataType.NOMINAL: counts = filt_df.groupby([str(col)]).size() ret.append( [ counts[b.bucket] if b.bucket in counts else 0 # type: ignore for b in r.buckets ] ) elif col.metadata_type == MetadataType.BOOLEAN: ret.append( [filt_df[str(col)].sum(), len(filt_df) - filt_df[str(col)].sum()] ) elif col.metadata_type == MetadataType.CONTINUOUS: bucs = [b.bucket for b in r.buckets] ret.append( filt_df.groupby([pd.cut(filt_df[str(col)], bucs)]) # type: ignore .size() .astype(int) .tolist() ) else: ret.append([]) return ret def histogram_metric( df: pd.DataFrame, metric_fn: Callable, col: ZenoColumn, bucket: HistogramBucket, model: str, metric: str, ) -> Union[float, None]: df_filt = filter_table_single(df, col, bucket) output_metric = metric_fn(df_filt, model, metric) if output_metric is None or isnan(output_metric): return None return output_metric def histogram_metrics( df: pd.DataFrame, metric_fn: Callable, req: HistogramRequest ) -> List[List[Union[float, None]]]: """Calculate metric for each bucket in each column histogram.""" if req.metric is None: return [] if req.filter_predicates is not None: filt_df = filter_table( df, req.filter_predicates, req.tag_ids, req.filter_ids, req.tag_list ) else: filt_df = df ret: List[List[Union[float, None]]] = [] for r in req.column_requests: col = r.column loc_ret: List[Union[float, None]] = [] for b in r.buckets: df_filt = filter_table_single(filt_df, col, b) metric = metric_fn(df_filt, req.model, req.metric) if metric is None or pd.isna(metric) or isnan(metric): loc_ret.append(None) else: loc_ret.append(metric) ret.append(loc_ret) return ret def filter_by_string(df: pd.DataFrame, req: StringFilterRequest) -> List[str]: """Filter the table based on a string filter request.""" short_ret: List[str] = [] regex = req.is_regex keyword = req.filter_string col_type = req.column case_match = req.case_match whole_word_match = req.whole_word_match # string search if not regex: col = df[str(col_type)].dropna().astype(str) if not case_match: col = col.str.lower() keyword = keyword.lower() if not whole_word_match: ret = [i for i in col if keyword in i] else: ret = [i for i in col if keyword == i] for r in ret[0:5]: idx = r.find(keyword) loc_str = r[0 if idx < 20 else idx - 20 : idx + 20] if len(r) > 40 + len(keyword): if idx - 20 > 0: loc_str = "..." + loc_str if idx + 20 < len(r): loc_str = loc_str + "..." short_ret.append(loc_str) # regex search else: flag = 0 if case_match else re.IGNORECASE keyword = f"\\b{keyword}\\b" if whole_word_match else keyword try: query_string = f"`{col_type}`.str.contains(r'{keyword}', flags=@flag)" ret = df.query(query_string)[str(col_type)].head().tolist() except Exception as e: print("Invalid Regex Error: ", e) return short_ret for r in ret: idx = re.search(keyword, r, flags=flag) if idx is not None: idx = idx.start() loc_str = r[0 if idx < 20 else idx - 20 : idx + 20] if len(r) > 40 + len(keyword): if idx - 20 > 0: loc_str = "..." + loc_str if idx + 20 < len(r): loc_str = loc_str + "..." short_ret.append(loc_str) return short_ret
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/processing/histogram_processing.py
histogram_processing.py
import secrets from typing import List import numpy as np import pandas as pd from sliceline.slicefinder import Slicefinder from zeno.classes.base import MetadataType from zeno.classes.slice import FilterPredicate, FilterPredicateGroup, Slice from zeno.classes.slice_finder import SliceFinderRequest, SliceFinderReturn from zeno.processing.filtering import filter_table from zeno.util import generate_diff_cols # Discretize continuous valued columns. def cont_cols_df(df, cols: List[str]): new_df = pd.DataFrame() for col in cols: df_col = df.loc[:, col].copy() bins = list(np.histogram_bin_edges(df_col, bins="doane")) bins[0], bins[len(bins) - 1] = bins[0] - 1, bins[len(bins) - 1] + 1 new_df.loc[:, col + "_encode"] = pd.cut(df_col, bins=bins) return new_df def slice_finder(df, req: SliceFinderRequest): """Return slices of data with high or low metric values. Args: df (DataFrame): Zeno DataFrame with all metadata. req (SliceFinderRequest): Request with columns, metrics, and options. Returns a SliceFinderMetricReturn Object. """ cont_search_cols, not_cont_search_cols = [], [] for col in req.search_columns: if col.metadata_type == MetadataType.CONTINUOUS: cont_search_cols.append(col) else: not_cont_search_cols.append(col) search_cols = not_cont_search_cols + cont_search_cols cont_search_cols = [str(col) for col in cont_search_cols] not_cont_search_cols = [str(col) for col in not_cont_search_cols] metric_col = "diff" if req.compare_column else str(req.metric_column) filt_df = filter_table( df, req.filter_predicates, req.filter_ids, req.tag_ids, req.tag_list ) cont_df = cont_cols_df(filt_df[cont_search_cols].dropna(), cont_search_cols) if req.compare_column: filt_df = generate_diff_cols(filt_df, req.metric_column, req.compare_column) unique_cols = set(not_cont_search_cols + [metric_col]) updated_df = pd.concat([filt_df[list(unique_cols)], cont_df], axis=1).dropna() normalized_metric_col = np.array(updated_df[metric_col], dtype=float) # Invert metric column if ascending. metric_max = np.max(normalized_metric_col) if req.order_by == "ascending": normalized_metric_col = metric_max - normalized_metric_col cont_search_cols = [col + "_encode" for col in cont_search_cols] search_cols_str = not_cont_search_cols + cont_search_cols slice_finder = Slicefinder(alpha=req.alpha, k=20, max_l=req.max_lattice) slice_finder.fit(updated_df[search_cols_str].to_numpy(), normalized_metric_col) if slice_finder.top_slices_ is None or slice_finder.top_slices_statistics_ is None: return SliceFinderReturn(slices=[], metrics=[], sizes=[], overall_metric=0) discovered_slices: List[Slice] = [] slice_metrics: List[float] = [] slice_sizes: List[int] = [] not_cont_search_num = len(not_cont_search_cols) for sli_i, sli in enumerate(slice_finder.top_slices_): # Rescale back to original metric. if req.order_by == "ascending": slice_metrics.append( metric_max - slice_finder.top_slices_statistics_[sli_i]["slice_average_error"] ) else: slice_metrics.append( slice_finder.top_slices_statistics_[sli_i]["slice_average_error"] ) slice_sizes.append(slice_finder.top_slices_statistics_[sli_i]["slice_size"]) predicate_list = [] for pred_i, sli_predicate in enumerate(sli): if sli_predicate is not None: join_val = "" if len(predicate_list) == 0 else "&" col = search_cols[pred_i] # not continuous columns if pred_i < not_cont_search_num: if str(sli_predicate) in ["True", "False"]: sli_predicate = "true" if sli_predicate else "false" predicate_list.append( FilterPredicate( column=col, operation="==", value=sli_predicate, join=join_val, ) ) # continuous columns else: left_pred = FilterPredicate( column=col, operation=">=", value=sli_predicate.left, join="", ) right_pred = FilterPredicate( column=col, operation="<", value=sli_predicate.right, join="&", ) predicate_list.append( FilterPredicateGroup( predicates=[left_pred, right_pred], join=join_val ), ) discovered_slices.append( Slice( slice_name="Generated Slice " + secrets.token_hex(nbytes=4), folder="", filter_predicates=FilterPredicateGroup( predicates=predicate_list, join="" ), ) ) return SliceFinderReturn( slices=discovered_slices, metrics=slice_metrics, sizes=slice_sizes, overall_metric=slice_finder.average_error_ if slice_finder.average_error_ else 0, )
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/processing/slice_finder.py
slice_finder.py
from typing import List import numpy as np import pandas as pd from sklearn import preprocessing from zeno.classes.base import MetadataType, ZenoColumn, ZenoColumnType from zeno.classes.projection import Points2D, PointsColors def check_embed_exists(df: pd.DataFrame, model: str): """Checks for the existence of an embedding column. Returns True if the column exists, False otherwise """ embed_column = ZenoColumn( column_type=ZenoColumnType.EMBEDDING, name="embedding", model=model ) exists = str(embed_column) in df.columns return exists and not df[str(embed_column)].isna().any() cached_projections = {} def run_tsne(df: pd.DataFrame, model: str) -> np.ndarray: """Project embedding into 2D space using t-SNE. Args: df (pd.DataFrame): Zeno DataFrame. model (str): Name of which model to use for the embedding. Returns: np.ndarray: 2D projection of the embedding. """ if model in cached_projections: return cached_projections[model] from openTSNE import TSNE embed_col = ZenoColumn( column_type=ZenoColumnType.EMBEDDING, name="embedding", model=model ) embed = df[str(embed_col)].to_numpy() embed = np.stack(embed, axis=0) # type: ignore all_available_processors = -1 default_iterations = 400 tsne = TSNE(n_jobs=all_available_processors, n_iter=default_iterations) projection = tsne.fit(embed) cached_projections[model] = projection return projection def projection_colors(df: pd.DataFrame, column: ZenoColumn) -> PointsColors: """Get colors for a projection based on a column. Args: df (pd.DataFrame): Dataframe with all columns from Zeno. column (ZenoColumn): Column to use for coloring the projection. Returns: PointsColors: The color range, the unique values, and the metadata type. """ series = df[str(column)] unique = series.unique() metadata_type = "nominal" color_range: List[int] = [] if len(unique) == 2: metadata_type = "boolean" if len(unique) > 10: if column.metadata_type == MetadataType.CONTINUOUS: metadata_type = "continuous" color_range = ( np.interp(series, (series.min(), series.max()), (0, 20)) .astype(int) .tolist() ) unique = np.array([series.min(), series.max()]) else: metadata_type = "other" color_range = [0] * len(series) else: labels = preprocessing.LabelEncoder().fit_transform(series) if isinstance(labels, np.ndarray): color_range = labels.astype(int).tolist() else: color_range = [0] * len(series) return PointsColors( color=color_range, domain=unique.tolist(), data_type=metadata_type ) def project_into_2d( df: pd.DataFrame, id_column: ZenoColumn, model: str, column: ZenoColumn ) -> Points2D: """If the embedding exists, will use t-SNE to project into 2D.""" points = Points2D(x=[], y=[], color=[], domain=[], opacity=[], data_type="", ids=[]) # Can't project without an embedding if not check_embed_exists(df, model): return points projection = run_tsne(df, model) # extract points and ids from computed projection points.x = projection[:, 0].tolist() points.y = projection[:, 1].tolist() color_results = projection_colors(df, column) points.color = color_results.color points.domain = color_results.domain points.data_type = color_results.data_type points.ids = df[str(id_column)].to_list() return points
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/processing/projection_processing.py
projection_processing.py
import os from inspect import getsource from pathlib import Path from typing import Callable, Dict, List import pandas as pd from tqdm import trange from zeno.api import DistillReturn, ModelReturn, ZenoOptions from zeno.classes.base import DataProcessingReturn, ZenoColumn, ZenoColumnType from zeno.util import load_series def predistill_data( fn: Callable[[pd.DataFrame, ZenoOptions], DistillReturn], column: ZenoColumn, options: ZenoOptions, cache_path: str, df: pd.DataFrame, batch_size: int, pos: int, ) -> List[DataProcessingReturn]: col_hash = str(column) # To prevent SettingWithCopyWarning col = df[col_hash].copy() save_path = Path(cache_path, col_hash + ".pickle") to_predict_indices = col.loc[pd.isna(col)].index if len(to_predict_indices) > 0: for i in trange( 0, len(to_predict_indices), batch_size, desc="preprocessing " + fn.__name__, position=pos, ): out = fn(df.loc[to_predict_indices[i : i + batch_size]], options) col.loc[to_predict_indices[i : i + batch_size]] = out.distill_output col.to_pickle(str(save_path)) return [DataProcessingReturn(column=column, output=col)] def run_inference( fn: Callable[[str], Callable[[pd.DataFrame, ZenoOptions], ModelReturn]], options: ZenoOptions, model_name: str, cache_path: str, df: pd.DataFrame, batch_size: int, pos: int, ) -> List[DataProcessingReturn]: model_col_obj = ZenoColumn( column_type=ZenoColumnType.OUTPUT, name="output", model=model_name ) embedding_col_obj = ZenoColumn( column_type=ZenoColumnType.EMBEDDING, name="embedding", model=model_name ) model_hash = str(model_col_obj) embedding_hash = str(embedding_col_obj) model_col = df[model_hash].copy() embedding_col = df[embedding_hash].copy() model_save_path = Path(cache_path, model_hash + ".pickle") embedding_save_path = Path(cache_path, embedding_hash + ".pickle") to_predict_indices = model_col.loc[pd.isna(model_col)].index other_return_cols: Dict[str, ZenoColumn] = {} if len(to_predict_indices) > 0: model_fn = fn(model_name) # Make output folder if function uses output_path src = getsource(model_fn) if "output_path" in src: file_cache_path = os.path.join(cache_path, model_hash) os.makedirs(file_cache_path, exist_ok=True) options = options.copy(update={"output_path": file_cache_path}) for i in trange( 0, len(to_predict_indices), batch_size, desc="Inference on " + model_name, position=pos, ): out = model_fn(df.loc[to_predict_indices[i : i + batch_size]], options) # Check if we also get embedding if out.embedding is not None: for j, idx in enumerate(to_predict_indices[i : i + batch_size]): model_col.at[idx] = out.model_output[j] # noqa: PD008 embedding_col.at[idx] = out.embedding[j] # noqa: PD008 embedding_col.to_pickle(str(embedding_save_path)) else: model_col.loc[to_predict_indices[i : i + batch_size]] = out.model_output if out.other_returns is not None: for k, v in out.other_returns.items(): if i == 0: postdistill_col_obj = ZenoColumn( column_type=ZenoColumnType.POSTDISTILL, name=k, model=model_name, ) other_return_cols[k] = postdistill_col_obj if str(postdistill_col_obj) not in df.columns: load_series( df, postdistill_col_obj, Path(cache_path, str(postdistill_col_obj) + ".pickle"), ) postdistill_col_obj = other_return_cols[k] postdistill_hash = str(postdistill_col_obj) postdistill_col = df[postdistill_hash].copy(deep=False) postdistill_col.loc[ to_predict_indices[i : i + batch_size] ] = out.other_returns[k] postdistill_save_path = Path( cache_path, postdistill_hash + ".pickle" ) postdistill_col.to_pickle(str(postdistill_save_path)) model_col.to_pickle(str(model_save_path)) ret = [DataProcessingReturn(column=model_col_obj, output=model_col)] if not embedding_col.isna().to_numpy().any(): # type: ignore ret.append(DataProcessingReturn(column=embedding_col_obj, output=embedding_col)) for k, v in other_return_cols.items(): ret.append(DataProcessingReturn(column=v, output=df[str(v)])) return ret def postdistill_data( fn: Callable[[pd.DataFrame, ZenoOptions], DistillReturn], model: str, options: ZenoOptions, cache_path: str, df: pd.DataFrame, batch_size: int, pos: int, ) -> List[DataProcessingReturn]: col_obj = ZenoColumn( column_type=ZenoColumnType.POSTDISTILL, name=fn.__name__, model=model, ) col_hash = str(col_obj) col = df[col_hash].copy() output_obj = ZenoColumn( column_type=ZenoColumnType.OUTPUT, name="output", model=model ) output_hash = str(output_obj) save_path = Path(cache_path, col_hash + ".pickle") to_predict_indices = col.loc[pd.isna(col)].index local_options = options.copy( update={ "output_column": output_hash, "output_path": os.path.join(cache_path, output_hash), } ) if len(to_predict_indices) > 0: for i in trange( 0, len(to_predict_indices), batch_size, desc="postprocessing " + fn.__name__ + " on " + model, position=pos, ): out = fn(df.loc[to_predict_indices[i : i + batch_size]], local_options) col.loc[to_predict_indices[i : i + batch_size]] = out.distill_output col.to_pickle(str(save_path)) return [DataProcessingReturn(column=col_obj, output=col)]
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/processing/data_processing.py
data_processing.py
from typing import List, Optional import pandas as pd from pandas import DataFrame from zeno.classes.base import MetadataType, ZenoColumn from zeno.classes.metadata import HistogramBucket from zeno.classes.slice import FilterIds, FilterPredicate, FilterPredicateGroup def get_filter_string(filter: FilterPredicateGroup) -> str: """Generate a filter string for Pandas query from a nested set of FilterPredicates. The join should go on the second predicate in a group. Args: filter (FilterPredicateGroup): Parent FilterPredicateGroup Returns: str: Filter string with added predicates """ filt = "" for f in filter.predicates: if isinstance(f, FilterPredicateGroup): if len(f.predicates) != 0: filt = filt + f.join + "(" filt = filt + get_filter_string(f) filt = filt + ")" elif isinstance(f, FilterPredicate): if "match" in f.operation: is_regex = "re" in f.operation is_case = "ca" in f.operation is_whole = "w" in f.operation is_not = "not" in f.operation if is_whole: f.value = f"\\b{f.value}\\b" if is_regex else f'"{f.value}"' filt_string = f"{f.join} (`{f.column}`.str.contains(\ r'{f.value}', na=False, regex={is_regex}, case={is_case}))" if (not is_regex) and is_whole: filt_string = f"{f.join} (`{f.column}`=={f.value})" try: filt += filt_string + "== False" if is_not else filt_string except Exception as e: print("Invalid Regex Error: ", e) else: try: val = str(float(f.value)) except ValueError: if str(f.value).lower() in [ "true", "false", ]: val = "True" if str(f.value).lower() == "true" else "False" else: val = '"{}"'.format(f.value) filt = filt + "{} (`{}` {} {})".format( f.join, f.column, f.operation, val ) return filt def filter_table( main_df, filter_predicates: Optional[FilterPredicateGroup] = None, list_ids_first: Optional[FilterIds] = None, list_ids_second: Optional[FilterIds] = None, tag_list: Optional[List[str]] = None, ) -> pd.DataFrame: all_indicies = [] if list_ids_first is not None and len(list_ids_first.ids) > 0: all_indicies += list_ids_first.ids if list_ids_second is not None and len(list_ids_second.ids) > 0: all_indicies += list_ids_second.ids # if we have ids, filter them out now! if len(all_indicies) > 0: # make sure the ids we are querying exist existing_ids = main_df.index.intersection(all_indicies) # this is fast because the index is set to ids main_df = main_df.loc[existing_ids] # empty selected tags so always return empty table if len(all_indicies) == 0 and tag_list is not None and len(tag_list) > 0: return main_df.iloc[0:0] if filter_predicates is not None: final_filter = get_filter_string(filter_predicates) if len(final_filter) > 0: return main_df.query(final_filter, engine="python") return main_df def filter_table_single(df: DataFrame, col: ZenoColumn, bucket: HistogramBucket): if ( col.metadata_type == MetadataType.NOMINAL or col.metadata_type == MetadataType.BOOLEAN ): return df[df[str(col)] == bucket.bucket] elif col.metadata_type == MetadataType.CONTINUOUS: return df[(df[str(col)] > bucket.bucket) & (df[str(col)] < bucket.bucket_end)] elif col.metadata_type == MetadataType.DATETIME: return df return df
zenoml
/zenoml-0.6.4-py3-none-any.whl/zeno/processing/filtering.py
filtering.py
![logo](https://image.prntscr.com/image/rcFKFC4xSXGIz-tRqtYnXA.png) # Zenon ![python](https://img.shields.io/badge/python-3.x-red.svg) ![discord](https://img.shields.io/badge/discord-userbot-blue.svg) ![disuerbot](https://img.shields.io/badge/discord--userbot-framework-brightgreen.svg) ![proxy](https://img.shields.io/badge/proxy-supported-yellowgreen.svg) a discord userbot or selfbot framework to interact with users instead of normal bots # Example ``` python import zenon token = "your-token" def on_message(): while True: chatid = "409879939400335362" message = client.get_message(chatid) if message == "!test": client.send_message(chatid, "sei grassa!") # client.send_message(chatid, "sei grassa!", tts = True) if __name__ == '__main__': client = zenon.Client(token) # client = zenon.Client(token, proxy = "ip:port") client.func_loop(on_message) ``` # Dependencies - requests `pip install requests` # how to get the token 1. open discordapp.com 2. log in or sign up 3. once you log in or sign up press ctrl + shift + i this should appear at your right side: ![alt text](https://image.ibb.co/eiD7Oc/rught_scode.png) 4. then press this button: ![alt text](https://image.ibb.co/iViwGx/righthttt.png) 5. go to where it says local storage and then press the website link: ![alt text](https://image.ibb.co/gyBoUH/roghttt.png) 6. after you have done so scroll down and find where it says "token" ![alt text](https://image.ibb.co/daUE3c/raght.png) 7. copy and paste the token 8. go and enjoy zenon! # Changelog http://telegra.ph/Zenon-Changelog-04-22 # Need help? [![discord](http://i.imgur.com/cbfIsqM.png)](https://discord.gg/YpCb2s) [![telegram](http://www.freepnglogos.com/uploads/telegram-logo-15.png)](https://t.me/tfchat)
zenon
/Zenon-0.6.tar.gz/Zenon-0.6/README.md
README.md
import ast import re import json import logging import requests log = logging.getLogger(__name__) # pylint: disable=C0103 requests.packages.urllib3.disable_warnings() ROUTERS = {'MessagingRouter': 'messaging', 'EventsRouter': 'evconsole', 'ProcessRouter': 'process', 'ServiceRouter': 'service', 'DeviceRouter': 'device', 'NetworkRouter': 'network', 'TemplateRouter': 'template', 'DetailNavRouter': 'detailnav', 'ReportRouter': 'report', 'MibRouter': 'mib', 'ZenPackRouter': 'zenpack'} class ZenossException(Exception): '''Custom exception for Zenoss ''' pass class EventState: """ eventState: 0 = new 1 = acknowledged 2 = suppressed 3 = closed 4 = cleared 5 = dropped 6 = aged """ new, acknowledged, suppressed, closed, cleared, dropped, aged = range(7) class EventSeverity: """ 0 = clear 1 = debug 2 = info 3 = warning 4 = error 5 = critical """ clear, debug, info, warning, error, critical = range(6) class ProductionState: production = 1000 pre_production = 500 test = 400 maintenance = 300 decommissioned = -1 class Zenoss(object): '''A class that represents a connection to a Zenoss server ''' def __init__(self, host=None, username=None, password=None, cert=None, ssl_verify=True): self.__host = host self.__session = requests.Session() # auth by username/password or client cert if username and password: self.__session.auth = (username, password) elif cert: self.__session.cert = cert else: self.__session.auth = None # host SSL verification enable/disabled self.__session.verify = ssl_verify # reset count self.__req_count = 0 def __router_request(self, router, method, data=None): '''Internal method to make calls to the Zenoss request router ''' if router not in ROUTERS: raise Exception('Router "' + router + '" not available.') req_data = json.dumps([dict( action=router, method=method, data=data, type='rpc', tid=self.__req_count)]) log.debug('Making request to router %s with method %s', router, method) uri = '%s/zport/dmd/%s_router' % (self.__host, ROUTERS[router]) headers = {'Content-type': 'application/json; charset=utf-8'} response = self.__session.post(uri, data=req_data, headers=headers) self.__req_count += 1 # The API returns a 200 response code even whe auth is bad. # With bad auth, the login page is displayed. Here I search for # an element on the login form to determine if auth failed. if re.search('name="__ac_name"', response.content.decode("utf-8")): log.error('Request failed. Bad username/password.') raise ZenossException('Request failed. Bad username/password.') return json.loads(response.content.decode("utf-8"))['result'] def get_rrd_values(self, device, dsnames, start=None, end=None, function='LAST'): # pylint: disable=R0913 '''Method to abstract the details of making a request to the getRRDValue method for a device ''' if function not in ['MINIMUM', 'AVERAGE', 'MAXIMUM', 'LAST']: raise ZenossException('Invalid RRD function {0} given.'.format(function)) if len(dsnames) == 1: # Appending a junk value to dsnames because if only one value is provided Zenoss fails to return a value. dsnames.append('junk') url = '{0}/{1}/getRRDValues'.format(self.__host, self.device_uid(device)) params = {'dsnames': dsnames, 'start': start, 'end': end, 'function': function} return ast.literal_eval(self.__session.get(url, params=params).content) def get_devices(self, device_class='/zport/dmd/Devices', limit=None): '''Get a list of all devices. ''' log.info('Getting all devices') return self.__router_request('DeviceRouter', 'getDevices', data=[{'uid': device_class, 'params': {}, 'limit': limit}]) def find_device(self, device_name): '''Find a device by name. ''' log.info('Finding device %s', device_name) all_devices = self.get_devices() try: device = [d for d in all_devices['devices'] if d['name'] == device_name][0] # We need to save the has for later operations device['hash'] = all_devices['hash'] log.info('%s found', device_name) return device except IndexError: log.error('Cannot locate device %s', device_name) raise Exception('Cannot locate device %s' % device_name) def device_uid(self, device): '''Helper method to retrieve the device UID for a given device name ''' return self.find_device(device)['uid'] def add_device(self, device_name, device_class, collector='localhost'): '''Add a device. ''' log.info('Adding %s', device_name) data = dict(deviceName=device_name, deviceClass=device_class, model=True, collector=collector) return self.__router_request('DeviceRouter', 'addDevice', [data]) def remove_device(self, device_name): '''Remove a device. ''' log.info('Removing %s', device_name) device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], action='delete') return self.__router_request('DeviceRouter', 'removeDevices', [data]) def move_device(self, device_name, organizer): '''Move the device the organizer specified. ''' log.info('Moving %s to %s', device_name, organizer) device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], target=organizer) return self.__router_request('DeviceRouter', 'moveDevices', [data]) def set_prod_state(self, device_name, prod_state): '''Set the production state of a device. ''' log.info('Setting prodState on %s to %s', device_name, prod_state) device = self.find_device(device_name) data = dict(uids=[device['uid']], prodState=prod_state, hashcheck=device['hash']) return self.__router_request('DeviceRouter', 'setProductionState', [data]) def set_maintenance(self, device_name): '''Helper method to set prodState for device so that it does not alert. ''' return self.set_prod_state(device_name, 300) def set_production(self, device_name): '''Helper method to set prodState for device so that it is back in production and alerting. ''' return self.set_prod_state(device_name, 1000) def set_product_info(self, device_name, hw_manufacturer, hw_product_name, os_manufacturer, os_product_name): # pylint: disable=R0913 '''Set ProductInfo on a device. ''' log.info('Setting ProductInfo on %s', device_name) device = self.find_device(device_name) data = dict(uid=device['uid'], hwManufacturer=hw_manufacturer, hwProductName=hw_product_name, osManufacturer=os_manufacturer, osProductName=os_product_name) return self.__router_request('DeviceRouter', 'setProductInfo', [data]) def set_rhel_release(self, device_name, release): '''Sets the proper release of RedHat Enterprise Linux.''' if type(release) is not float: log.error("RHEL release must be a float") return {u'success': False} log.info('Setting RHEL release on %s to %s', device_name, release) device = self.find_device(device_name) return self.set_product_info(device_name, device['hwManufacturer']['name'], device['hwModel']['name'], 'RedHat', 'RHEL {}'.format(release)) def set_device_info(self, device_name, data): '''Set attributes on a device or device organizer. This method accepts any keyword argument for the property that you wish to set. ''' data['uid'] = self.find_device(device_name)['uid'] return self.__router_request('DeviceRouter', 'setInfo', [data]) def remodel_device(self, device_name): '''Submit a job to have a device remodeled. ''' return self.__router_request('DeviceRouter', 'remodel', [dict(uid=self.find_device(device_name)['uid'])]) def set_collector(self, device_name, collector): '''Set collector for device. ''' device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], collector=collector) return self.__router_request('DeviceRouter', 'setCollector', [data]) def rename_device(self, device_name, new_name): '''Rename a device. ''' data = dict(uid=self.find_device(device_name)['uid'], newId=new_name) return self.__router_request('DeviceRouter', 'renameDevice', [data]) def reset_ip(self, device_name, ip_address=''): '''Reset IP address(es) of device to the results of a DNS lookup or a manually set address. ''' device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], ip=ip_address) return self.__router_request('DeviceRouter', 'resetIp', [data]) def get_events(self, limit=0, start=0, sort='lastTime', dir='DESC', params={}, archive=False, uid=None, detailFormat=False): """ Use EventsRouter action (Class) and query method found in JSON API docs on Zenoss website: query(self, limit=0, start=0, sort='lastTime', dir='desc', params=None, archive=False, uid=None, detailFormat=False) Parameters: limit (integer) - (optional) Max index of events to retrieve (default: 0) start (integer) - (optional) Min index of events to retrieve (default: 0) sort (string) - (optional) Key on which to sort the return results (default: 'lastTime') dir (string) - (optional) Sort order; can be either 'ASC' or 'DESC' (default: 'DESC') params (dictionary) - (optional) Key-value pair of filters for this search. (default: None) params are the filters to the query method and can be found in the _buildFilter method. agent = params.get('agent'), component = ??? count_range = params.get('count'), current_user_name = params.get('ownerid'), details = details, device = ??? element_sub_title = params.get('component'), element_title = params.get('device'), event_class = filter(None, [params.get('eventClass')]), event_summary = params.get('summary'), fingerprint = params.get('dedupid'), Note that the time values can be ranges where a valid range would be '2012-09-07 07:57:33/2012-11-22 17:57:33' first_seen = params.get('firstTime') and self._timeRange(params.get('firstTime')), last_seen = params.get('lastTime') and self._timeRange(params.get('lastTime')), monitor = params.get('monitor'), severity = params.get('severity'), status = [i for i in params.get('eventState', [])], status_change = params.get('stateChange') and self._timeRange(params.get('stateChange')), Systems (string) = tags = params.get('tags'), uuid = filterEventUuids, archive (boolean) - (optional) True to search the event history table instead of active events (default: False) uid (string) - (optional) Context for the query (default: None) detailFormat (boolean) - (optional) True to include/retrieve event detail instead of event summary(default: False) Returns: dictionary Properties: events: ([dictionary]) List of objects representing events totalCount: (integer) Total count of events returned asof: (float) Current time """ # prepare data by passing arguments data = locals().copy() del data["self"] # query Zenoss log.info('Getting events for %s', data) response = self.__router_request('EventsRouter', 'query', [data]) # if a valid response, continue to check if further queries are required to get all events if 'success' in response and response['success'] == True: result_total_count = response['totalCount'] result_count_in_initial_response = len(response['events']) log.info('%s events received, %s requested, %s events available', result_count_in_initial_response, data['limit'], result_total_count) # if the number of results is less than the limit requested # and there are more total events available if result_count_in_initial_response < data['limit'] and result_total_count > result_count_in_initial_response: # iterate through remaining results in batches quotient = result_total_count / result_count_in_initial_response for i in range(0, quotient): data['start'] = (i + 1) * result_count_in_initial_response data['limit'] = result_count_in_initial_response # store additional query result temporarily log.info('Getting events for %s', data) temp_response = self.__router_request('EventsRouter', 'query', [data]) # add events to initial response response['events'] += temp_response['events'] log.info('Events received: %s (iteration %s), %s (total)', len(temp_response['events']), (i + 2), len(response['events'])) return response['events'] # response was not valid else: log.error('No success field in response or success == false. %s', response['msg']) return None def get_event_detail(self, event_id): '''Find specific event details ''' data = dict(evid=event_id) return self.__router_request('EventsRouter', 'detail', [data]) def write_log(self, event_id, message): '''Write a message to the event's log ''' data = dict(evid=event_id, message=message) return self.__router_request('EventsRouter', 'write_log', [data]) def change_event_state(self, event_id, state): '''Change the state of an event. ''' log.info('Changing eventState on %s to %s', event_id, state) return self.__router_request('EventsRouter', state, [{'evids': [event_id]}]) def ack_event(self, event_id): '''Helper method to set the event state to acknowledged. ''' return self.change_event_state(event_id, 'acknowledge') def close_event(self, event_id): '''Helper method to set the event state to closed. ''' return self.change_event_state(event_id, 'close') def create_event_on_device(self, device_name, severity, summary): '''Manually create a new event for the device specified. ''' log.info('Creating new event for %s with severity %s', device_name, severity) if severity not in ('Critical', 'Error', 'Warning', 'Info', 'Debug', 'Clear'): raise Exception('Severity %s is not valid.' % severity) data = dict(device=device_name, summary=summary, severity=severity, component='', evclasskey='', evclass='') return self.__router_request('EventsRouter', 'add_event', [data]) def get_load_average(self, device): '''Returns the current 1, 5 and 15 minute load averages for a device. ''' dsnames = ('laLoadInt1_laLoadInt1', 'laLoadInt5_laLoadInt5', 'laLoadInt15_laLoadInt15') result = self.get_rrd_values(device=device, dsnames=dsnames) def normalize_load(load): '''Convert raw RRD load average to something reasonable so that it matches output from /proc/loadavg''' return round(float(load) / 100.0, 2) return [normalize_load(l) for l in result.values()]
zenoss-fork
/zenoss-fork-0.7.4.tar.gz/zenoss-fork-0.7.4/zenoss.py
zenoss.py
zenoss-hipchat ============== Command suitable in use for Zenoss notification commands for sending events to hipchat. Installation ------------ Simply install using your preferred python package manager with either: ``pip install zenoss-hipchat`` for the latest release, or ``pip install -e git+https://github.com/carsongee/zenoss-hipchat#egg=zenoss-hipchat`` for the latest development version. Configuration ------------- In Zenoss go to ``Events`` -> ``Triggers`` and create a trigger with the rules for which you want to send events to hipchat. Of course you can use an existing trigger as well. For more detailed guide on triggers and notifications see the `community documentation <http://wiki.zenoss.org/Notify_Me_of_Important_Events>`_. After you have a trigger you wish to use, go to ``notifications`` and create a new notification. Set the ``Id`` to something memorable like `HipChatErrors` or similar and choose ``Command`` as the action. After creating the notification, edit it. On the ``Notification`` tab configure it as you see fit, but you are generally going to want to make sure it is enabled, and that you have added the Trigger you created earlier. The command does support clear messages, so go ahead and check that option if you like. Now on the ``Content`` tab of the notification paste the following into the ``Command`` field: .. code-block:: bash zenoss-hipchat --device="${evt/device}" --info=${evt/summary} --component="${evt/component}" --severity=${evt/severity} --url="${urls/eventUrl}" --message=${evt/message} And if you want to use the clear option, for the clear command: .. code-block:: bash zenoss-hipchat --device="${evt/device}" --info=${evt/summary} --component="${evt/component}" --severity=${evt/severity} --url="${urls/eventUrl}" --message=${evt/message} --cleared-by="${evt/clearid}" --clear You also need to provide the room and API token using the ``Environment variables`` field with something like: .. code-block:: bash HIPCHAT_TOKEN=<APIv1 Token>;HIPCHAT_ROOM=<Room Name (or ID) to post to> replacing the values with ones appropriate for you. Additional Environment Variables -------------------------------- In addition to ``HIPCHAT_TOKEN`` and ``HIPCHAT_ROOM`` which are required, you can also override other options with the following optional environment variables: - ``HIPCHAT_API_V1_ENDPOINT`` - Allows you to override the API endpoint if you are using private HipChat - ``HIPCHAT_FROM`` - Defaults to Zenoss, and determines who the messages appear to be coming from. - ``HIPCHAT_TIMEOUT`` - Defaults to 3 seconds, but if you have a slow connection to the HipChat server it can be increased or decreased. - ``HIPCHAT_NOTIFY_SEVERITY`` - Defaults to Error and above (4), but can raised or lowered and determines which events trigger the HipChat notification.
zenoss-hipchat
/zenoss-hipchat-0.2.0.tar.gz/zenoss-hipchat-0.2.0/README.rst
README.rst
from __future__ import print_function import argparse import sys from zenoss_hipchat import config from zenoss_hipchat.hipchat import HipChatEvent def entry_point(): """ Entry point to script. """ parser = argparse.ArgumentParser( prog='zenoss_hipchat', description=('Converts arguments into templated strings and uses ' 'HipChat APIv1 to send the event to a HipChat room ' 'specified by the environment variable HIPCHAT_ROOM ' 'and the API token in the environment variable ' 'HIPCHAT_TOKEN') ) # Add required arguments req_opts = parser.add_argument_group('required arguments') req_opts.add_argument('-d', '--device', type=str, required=True, help='Device where event occurred') req_opts.add_argument('-i', '--info', type=str, required=True, help='Short message for event') req_opts.add_argument('-c', '--component', type=str, required=True, help='Component of device for event') req_opts.add_argument('-s', '--severity', type=int, required=True, help='Severity number from 0-5. See ' 'http://community.zenoss.org/docs/DOC-9437#d0e6134') req_opts.add_argument('-u', '--url', type=str, required=True, help='URL to go to event notificaiton') req_opts.add_argument('-m', '--message', type=str, required=True, help='Long event message') # Optional arguments parser.add_argument('-b', '--cleared-by', type=str, help='What cleared the event (when --clear is set)') parser.add_argument('-o', '--clear', action="store_true", help="Set if event is being cleared") args = parser.parse_args() if not config.HIPCHAT_API_V1_TOKEN and not config.HIPCHAT_ROOM_ID: print('Environment variable "HIPCHAT_TOKEN" and "HIPCHAT_ROOM" ' 'must be specified and valid before this command can be run') sys.exit(-1) if args.clear and not args.cleared_by: print('--cleared-by is required when using --clear') sys.exit(-1) hip_chat_event = HipChatEvent(**vars(args)) hip_chat_event.send()
zenoss-hipchat
/zenoss-hipchat-0.2.0.tar.gz/zenoss-hipchat-0.2.0/zenoss_hipchat/command.py
command.py
import datetime import requests from zenoss_hipchat import config class HipChatEventSendException(Exception): """ Exception if the send failed """ pass class HipChatEvent(object): """ Simple class for sending zenoss events to hipchat """ SEVERITY_MAP = [ ('green', 'Clear'), ('gray', 'Debug'), ('purple', 'Info'), ('yellow', 'Warn'), ('yellow', 'Error'), ('red', 'Crit'), ] NOTIFY_SEVERITY = config.NOTIFY_SEVERITY def __init__( self, device, info, component, severity, url, message, cleared_by, clear=False ): """ Setup session and properties """ self.device = device self.info = info self.component = component self.severity = severity self.url = url self.message = message self.cleared_by = cleared_by self.clear = clear self.time = datetime.datetime.now().isoformat() # Setup session self.session = requests.Session() self.session.headers = { 'Content-Type': 'application/x-www-form-urlencoded' } self.session.params = { 'format': 'json', 'auth_token': config.HIPCHAT_API_V1_TOKEN } self.post_url = 'https://{0}/v1/rooms/message'.format( config.HIPCHAT_API_V1_ENDPOINT ) def _event_message(self): """ Return templated event notification """ return ('{device}: <a href="{url}">{info}</a><br />' '<b>Component</b>: {component}<br />' '<b>Time</b>: {time}<br />' '<b>Message</b>:' '{message}').format(**vars(self)) def _clear_message(self): """ Return templated clear notification """ return ('<b><i>Cleared!</b></i><br />' '{device}:<a href="{url}">{info}</a> Cleared<br />' '<b>Cleared By</b>: {cleared_by}<br />' '<b>Component</b>: {component}<br />' '<b>Time</b>: {time}<br />' '<b>Message</b>:' '{message}').format(**vars(self)) def _should_notify(self): """ Determine if we should set the notify flag in HipChat call """ if self.severity >= self.NOTIFY_SEVERITY: return 1 return 0 def send(self): """ Send off the message to environment room with a nice template """ if not self.clear: message = self._event_message() else: message = self._clear_message() from_name = "{0} ({1})".format( config.HIPCHAT_FROM, self.SEVERITY_MAP[self.severity][1] ) response = self.session.post( url=self.post_url, data={ 'room_id': config.HIPCHAT_ROOM_ID, 'from': from_name, 'message_format': 'html', 'notify': self._should_notify(), 'color': self.SEVERITY_MAP[self.severity][0], 'message': message }, timeout=config.REQUEST_TIMEOUT ) if response.status_code != 200: raise HipChatEventSendException(response.text)
zenoss-hipchat
/zenoss-hipchat-0.2.0.tar.gz/zenoss-hipchat-0.2.0/zenoss_hipchat/hipchat.py
hipchat.py
zenoss-snmp-module ================== This project provides a Net-SNMP pass_persist script for monitoring Zenoss. If you aren't familiar with Net-SNMP's pass_persist option, it allows an external script to provide responses for all GET and GETNEXT requires under a configured base OID. Currently zenoss-snmp-module provides support for the provided ZENOSS-PROCESS- MIB. See the following snmptranslate command for what the MIB provides:: $ snmptranslate -Tp ZENOSS-PROCESS-MIB::zenossProcessMIB +--zenossProcessMIB(3) | +--zenSystemTable(1) | | | +--zenSystemEntry(1) | | Index: zenSystemName | | | +-- -R-- String zenSystemName(1) | Textual Convention: DisplayString | Size: 0..255 | +--zenProcessTable(2) | | | +--zenProcessEntry(1) | | Index: zenSystemName, zenProcessName | | | +-- -R-- String zenProcessName(1) | Textual Convention: DisplayString | Size: 0..255 | +--zenProcessMetricTable(3) | +--zenProcessMetricEntry(1) | Index: zenSystemName, zenProcessName, zenProcessMetricName | +-- -R-- String zenProcessMetricName(1) | Textual Convention: DisplayString | Size: 0..255 +-- -R-- String zenProcessMetricValue(2) | Textual Convention: DisplayString | Size: 0..255 +-- -R-- String zenProcessMetricCyclesSinceUpdate(3) Textual Convention: DisplayString Size: 0..255 $ snmpwalk -v2c -c public localhost ZENOSS-PROCESS-MIB::zenossProcessMIB ZENOSS-PROCESS-MIB::zenSystemName."localhost" = STRING: localhost ZENOSS-PROCESS-MIB::zenProcessName."localhost"."zenhub" = STRING: zenhub ZENOSS-PROCESS-MIB::zenProcessName."localhost"."zenwebtx" = STRING: zenwebtx ZENOSS-PROCESS-MIB::zenProcessName."localhost"."zencommand" = STRING: zencommand ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenhub"."services" = STRING: services ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenhub"."totalTime" = STRING: totalTime ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenhub"."totalEvents" = STRING: totalEvents ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenhub"."invalidations" = STRING: invalidations ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenhub"."totalCallTime" = STRING: totalCallTime ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenhub"."workListLength" = STRING: workListLength ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."devices" = STRING: devices ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."dataPoints" = STRING: dataPoints ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."eventCount" = STRING: eventCount ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."cyclePoints" = STRING: cyclePoints ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."queuedTasks" = STRING: queuedTasks ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."runningTasks" = STRING: runningTasks ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zenwebtx"."eventQueueLength" = STRING: eventQueueLength ZENOSS-PROCESS-MIB::zenProcessMetricName."localhost"."zencommand"."eventQueueLength" = STRING: eventQueueLength ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."devices" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."dataPoints" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."eventCount" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."cyclePoints" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."queuedTasks" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."runningTasks" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."eventQueueLength" = STRING: 0.0 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenhub"."services" = STRING: 2.35 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenhub"."totalTime" = STRING: 2.35 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenhub"."totalEvents" = STRING: 2.35 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenhub"."invalidations" = STRING: 2.35 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenhub"."totalCallTime" = STRING: 2.35 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenhub"."workListLength" = STRING: 2.35 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."devices" = STRING: 0.48 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."dataPoints" = STRING: 0.48 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."eventCount" = STRING: 0.48 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."cyclePoints" = STRING: 0.48 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."queuedTasks" = STRING: 0.48 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."runningTasks" = STRING: 0.48 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zenwebtx"."eventQueueLength" = STRING: 0.45 ZENOSS-PROCESS-MIB::zenProcessMetricCyclesSinceUpdate."localhost"."zencommand"."eventQueueLength" = STRING: 0.12 Usage ----- To install zenoss-snmp-module you must run the following command:: sudo easy_install -U zenoss-snmp-module Once installed, the ``zenoss-snmp-module`` script provides built-in support for helping you configure it. See the following command examples for installing the associated MIB and configuring snmpd:: # Install ZENOSS-PROCESS-MIB. zenoss-snmp-module --mib | sudo tee /usr/share/snmp/mibs/ZENOSS-PROCESS-MIB.txt # Add pass_persist line to snmpd.conf. zenoss-snmp-module --snmpd | sudo tee -a /etc/snmp/snmpd.conf # Restart snmpd service. sudo service snmpd restart After changing snmpd.conf you must restart the snmpd service. Then you should be able to test with the following command:: # Walk the entire zenossProcessMIB. snmpwalk -mALL -v2c -c public localhost zenossProcessMIB Try snmpwalk commands like the following to get more specific results:: # Only show metric values for the zenwebtx proces on the localhost collector. snmpwalk -mALL -v2c -c public localhost 'zenProcessMetricValue."localhost"."zenwebtx"' # Show how many cycles it's been since each metric was updated. snmpwalk -mALL -v2c -c public localhost 'zenProcessMetricCyclesSinceUpdate."localhost"' You will need to know the OIDs for these values to poll them with Zenoss. Use a command like the following to discover the OID for a given value. Note that because these OIDs are just encoded system, process and metric names, they will return the expected data from any system and can be considered permanent:: # Translate from name to OID. snmptranslate -On 'ZENOSS-PROCESS-MIB::zenProcessMetricValue."localhost"."zenwebtx"."queuedTasks"' Troubleshooting --------------- Normally zenoss-snmp-module is run from within snmpd. This makes it difficult to troubleshoot problems. To test the script outside of snmpd, you can run ``zenoss-snmp-module`` as root. If things are working properly, this will appear to do nothing. See the following session as an example:: # zenoss-snmp-module PING PONG DUMP {'1.1.1.9.108.111.99.97.108.104.111.115.116': {'type': 'STRING', 'value': 'localhost'}, ... snipped ... It can also be useful to stop the snmpd service and run it in the foreground with just the useful debugging enabled:: sudo service snmpd stop sudo snmpd -fV -Lo -Ducd-snmp/pass_persist -Doutput Be sure to start the snmpd service once you're done testing.
zenoss-snmp-module
/zenoss-snmp-module-1.0.0rc4.tar.gz/zenoss-snmp-module-1.0.0rc4/README.rst
README.rst
import math import os import sys import argparse import rrdtool import snmp_passpersist as snmp import time import which BASE_OID = '.1.3.6.1.4.1.14296.3' PP = None ZENHOME = None def main(): global PP global ZENHOME default_zenhome = os.getenv('ZENHOME', '/opt/zenoss') parser = argparse.ArgumentParser() parser.add_argument( '--zenhome', help='ZENHOME directory. Default is {0}'.format(default_zenhome), default=default_zenhome) help_group = parser.add_argument_group( 'Configuration Help', 'These arguments print configuration information then exit.') # Nested mutually exclusive groups currently appear to be broken. # Leaving this here so it'll work once it's fixed in Python. mutex_help_group = help_group.add_mutually_exclusive_group() mutex_help_group.add_argument( '--readme', action='store_true', help='Prints README.') mutex_help_group.add_argument( '--info', action='store_true', help='Prints system, process and metric information.') mutex_help_group.add_argument( '--mib', action='store_true', help='Prints ZENOSS-PROCESS-MIB.') mutex_help_group.add_argument( '--snmpd', action='store_true', help='Prints snmpd.conf configuration excerpt.') args = parser.parse_args() ZENHOME = args.zenhome if args.readme: print_local_file('README.rst') sys.exit(0) if args.info: print_information() sys.exit(0) if args.mib: print_local_file('ZENOSS-PROCESS-MIB.txt') sys.exit(0) if args.snmpd: print_snmpd() sys.exit(0) # Required for snmp_passpersist to work. unbuffer_stdout() # Respond to OID requests. PP = snmp.PassPersist(BASE_OID) try: PP.start(update, 10) except KeyboardInterrupt: # It's OK. Let the user quit. pass def print_local_file(filename): path = os.path.join(os.path.dirname(__file__), filename) with open(path, 'r') as f: print f.read() def print_information(): for system_name in system_names(): print "System: {0}".format(system_name) for process_name in process_names(system_name): print " Process: {0}".format(process_name) for metric_name in metric_names(system_name, process_name): print " Metric: {0}".format(metric_name) print print def print_snmpd(): global ZENHOME try: script_path = which.which('zenoss-snmp-module') except which.WhichError: script_path = '/usr/bin/zenoss-snmp-module' print "# Pass control of ZENOSS-PROCESS-MIB::zenossProcessMIB." print "pass_persist {0} {1} --zenhome={2}".format( BASE_OID, script_path, ZENHOME) def unbuffer_stdout(): unbuffered = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stdout = unbuffered def none_or_nan(value): if value is None or math.isnan(value): return True class MIB: ''' Class representation of ZENOSS-PROCESS-MIB. ''' zenSystemTable = '1' zenSystemEntry = '1.1' zenSystemName = '1.1.1' zenProcessTable = '2' zenProcessEntry = '2.1' zenProcessName = '2.1.1' zenProcessMetricTable = '3' zenProcessMetricEntry = '3.1' zenProcessMetricName = '3.1.1' zenProcessMetricValue = '3.1.2' zenProcessMetricCyclesSinceUpdate = '3.1.3' def oid(identifier, string_indices): return '{0}.{1}'.format( identifier, '.'.join(map(PP.encode, string_indices))) def update(): global PP update_zenSystemTable(PP) def update_zenSystemTable(PP): for system_name in system_names(): indices = [system_name] PP.add_str( oid(MIB.zenSystemName, indices), system_name) update_zenProcessTable(PP, system_name) def update_zenProcessTable(PP, system_name): for process_name in process_names(system_name): indices = [system_name, process_name] PP.add_str(oid(MIB.zenProcessName, indices), process_name) update_zenProcessMetricTable(PP, system_name, process_name) def update_zenProcessMetricTable(PP, system_name, process_name): for metric_name in metric_names(system_name, process_name): indices = [system_name, process_name, metric_name] PP.add_str(oid(MIB.zenProcessMetricName, indices), metric_name) try: rrd_filename = daemons_path( system_name, '{0}_{1}.rrd'.format(process_name, metric_name)) info, _, values = rrdtool.fetch(rrd_filename, 'AVERAGE') # Often the last sample is missing. Allow for it by using # the second-most-recent sample instead. if not none_or_nan(values[-1][0]): metric_value = values[-1][0] else: metric_value = values[-2][0] if not none_or_nan(metric_value): PP.add_str( oid(MIB.zenProcessMetricValue, indices), metric_value) # Figure out how many cycles (with decimal precision) it has # been since the metric was last updated. This is a better # measure than seconds since update because some metrics are # stored more frequently than others. step = info[2] seconds_since_update = time.time() - rrdtool.last(rrd_filename) cycles_since_update = seconds_since_update / step PP.add_str( oid(MIB.zenProcessMetricCyclesSinceUpdate, indices), '{0:.2f}'.format(cycles_since_update)) except Exception: pass def zen_path(*args): return os.path.join(ZENHOME, *args) def daemons_path(*args): return zen_path('perf', 'Daemons', *args) def system_names(): for dirname in os.listdir(daemons_path()): if os.path.isdir(daemons_path(dirname)): yield dirname def process_names(system_name): yielded = set() for filename in os.listdir(daemons_path(system_name)): if not os.path.isfile(daemons_path(system_name, filename)): continue if not filename.endswith('.rrd'): continue process_name = filename.split('_', 1)[0] if process_name not in yielded: yielded.add(process_name) yield process_name def metric_names(system_name, process_name): for filename in os.listdir(daemons_path(system_name)): if not os.path.isfile(daemons_path(system_name, filename)): continue if not filename.endswith('.rrd'): continue if not filename.startswith('{0}_'.format(process_name)): continue yield filename.split('_', 1)[1].split('.')[0] if __name__ == '__main__': main()
zenoss-snmp-module
/zenoss-snmp-module-1.0.0rc4.tar.gz/zenoss-snmp-module-1.0.0rc4/zenoss_snmp_module.py
zenoss_snmp_module.py
import ast import re import json import logging import requests log = logging.getLogger(__name__) # pylint: disable=C0103 requests.packages.urllib3.disable_warnings() ROUTERS = {'MessagingRouter': 'messaging', 'EventsRouter': 'evconsole', 'ProcessRouter': 'process', 'ServiceRouter': 'service', 'DeviceRouter': 'device', 'NetworkRouter': 'network', 'TemplateRouter': 'template', 'DetailNavRouter': 'detailnav', 'ReportRouter': 'report', 'MibRouter': 'mib', 'ZenPackRouter': 'zenpack'} class ZenossException(Exception): '''Custom exception for Zenoss ''' pass class Zenoss(object): '''A class that represents a connection to a Zenoss server ''' def __init__(self, host, username, password, ssl_verify=True): self.__host = host self.__session = requests.Session() self.__session.auth = (username, password) self.__session.verify = ssl_verify self.__req_count = 0 def __router_request(self, router, method, data=None): '''Internal method to make calls to the Zenoss request router ''' if router not in ROUTERS: raise Exception('Router "' + router + '" not available.') req_data = json.dumps([dict( action=router, method=method, data=data, type='rpc', tid=self.__req_count)]) log.debug('Making request to router %s with method %s', router, method) uri = '%s/zport/dmd/%s_router' % (self.__host, ROUTERS[router]) headers = {'Content-type': 'application/json; charset=utf-8'} response = self.__session.post(uri, data=req_data, headers=headers) self.__req_count += 1 # The API returns a 200 response code even whe auth is bad. # With bad auth, the login page is displayed. Here I search for # an element on the login form to determine if auth failed. if re.search('name="__ac_name"', response.content.decode("utf-8")): log.error('Request failed. Bad username/password.') raise ZenossException('Request failed. Bad username/password.') return json.loads(response.content.decode("utf-8"))['result'] def get_rrd_values(self, device, dsnames, start=None, end=None, function='LAST'): # pylint: disable=R0913 '''Method to abstract the details of making a request to the getRRDValue method for a device ''' if function not in ['MINIMUM', 'AVERAGE', 'MAXIMUM', 'LAST']: raise ZenossException('Invalid RRD function {0} given.'.format(function)) if len(dsnames) == 1: # Appending a junk value to dsnames because if only one value is provided Zenoss fails to return a value. dsnames.append('junk') url = '{0}/{1}/getRRDValues'.format(self.__host, self.device_uid(device)) params = {'dsnames': dsnames, 'start': start, 'end': end, 'function': function} return ast.literal_eval(self.__session.get(url, params=params).content) def get_devices(self, device_class='/zport/dmd/Devices', limit=None): '''Get a list of all devices. ''' log.info('Getting all devices') return self.__router_request('DeviceRouter', 'getDevices', data=[{'uid': device_class, 'params': {}, 'limit': limit}]) def get_components(self, device_name, **kwargs): '''Get components for a device given the name ''' uid = self.device_uid(device_name) return self.get_components_by_uid(uid=uid, **kwargs) def get_components_by_uid(self, uid=None, meta_type=None, keys=None, start=0, limit=50, page=0, sort='name', dir='ASC', name=None): '''Get components for a device given the uid ''' data = dict(uid=uid, meta_type=meta_type, keys=keys, start=start, limit=limit, page=page, sort=sort, dir=dir, name=name) return self.__router_request('DeviceRouter', 'getComponents', [data]) def find_device(self, device_name): '''Find a device by name. ''' log.info('Finding device %s', device_name) all_devices = self.get_devices() try: device = [d for d in all_devices['devices'] if d['name'] == device_name][0] # We need to save the has for later operations device['hash'] = all_devices['hash'] log.info('%s found', device_name) return device except IndexError: log.error('Cannot locate device %s', device_name) raise Exception('Cannot locate device %s' % device_name) def device_uid(self, device): '''Helper method to retrieve the device UID for a given device name ''' return self.find_device(device)['uid'] def add_device(self, device_name, device_class, collector='localhost'): '''Add a device. ''' log.info('Adding %s', device_name) data = dict(deviceName=device_name, deviceClass=device_class, model=True, collector=collector) return self.__router_request('DeviceRouter', 'addDevice', [data]) def remove_device(self, device_name): '''Remove a device. ''' log.info('Removing %s', device_name) device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], action='delete') return self.__router_request('DeviceRouter', 'removeDevices', [data]) def move_device(self, device_name, organizer): '''Move the device the organizer specified. ''' log.info('Moving %s to %s', device_name, organizer) device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], target=organizer) return self.__router_request('DeviceRouter', 'moveDevices', [data]) def set_prod_state(self, device_name, prod_state): '''Set the production state of a device. ''' log.info('Setting prodState on %s to %s', device_name, prod_state) device = self.find_device(device_name) data = dict(uids=[device['uid']], prodState=prod_state, hashcheck=device['hash']) return self.__router_request('DeviceRouter', 'setProductionState', [data]) def set_maintenance(self, device_name): '''Helper method to set prodState for device so that it does not alert. ''' return self.set_prod_state(device_name, 300) def set_production(self, device_name): '''Helper method to set prodState for device so that it is back in production and alerting. ''' return self.set_prod_state(device_name, 1000) def set_product_info(self, device_name, hw_manufacturer, hw_product_name, os_manufacturer, os_product_name): # pylint: disable=R0913 '''Set ProductInfo on a device. ''' log.info('Setting ProductInfo on %s', device_name) device = self.find_device(device_name) data = dict(uid=device['uid'], hwManufacturer=hw_manufacturer, hwProductName=hw_product_name, osManufacturer=os_manufacturer, osProductName=os_product_name) return self.__router_request('DeviceRouter', 'setProductInfo', [data]) def set_rhel_release(self, device_name, release): '''Sets the proper release of RedHat Enterprise Linux.''' if type(release) is not float: log.error("RHEL release must be a float") return {u'success': False} log.info('Setting RHEL release on %s to %s', device_name, release) device = self.find_device(device_name) return self.set_product_info(device_name, device['hwManufacturer']['name'], device['hwModel']['name'], 'RedHat', 'RHEL {}'.format(release)) def set_device_info(self, device_name, data): '''Set attributes on a device or device organizer. This method accepts any keyword argument for the property that you wish to set. ''' data['uid'] = self.find_device(device_name)['uid'] return self.__router_request('DeviceRouter', 'setInfo', [data]) def remodel_device(self, device_name): '''Submit a job to have a device remodeled. ''' return self.__router_request('DeviceRouter', 'remodel', [dict(uid=self.find_device(device_name)['uid'])]) def set_collector(self, device_name, collector): '''Set collector for device. ''' device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], collector=collector) return self.__router_request('DeviceRouter', 'setCollector', [data]) def rename_device(self, device_name, new_name): '''Rename a device. ''' data = dict(uid=self.find_device(device_name)['uid'], newId=new_name) return self.__router_request('DeviceRouter', 'renameDevice', [data]) def reset_ip(self, device_name, ip_address=''): '''Reset IP address(es) of device to the results of a DNS lookup or a manually set address. ''' device = self.find_device(device_name) data = dict(uids=[device['uid']], hashcheck=device['hash'], ip=ip_address) return self.__router_request('DeviceRouter', 'resetIp', [data]) def get_events(self, device=None, limit=100, component=None, severity=None, event_class=None, start=0, event_state=None, sort='severity', direction='DESC'): '''Find current events. Returns a list of dicts containing event details. By default they are sorted in descending order of severity. By default, severity {5, 4, 3, 2} and state {0, 1} are the only events that will appear. ''' if severity is None: severity = [5, 4, 3, 2] if event_state is None: event_state = [0, 1] data = dict(start=start, limit=limit, dir=direction, sort=sort) data['params'] = dict(severity=severity, eventState=event_state) if device is not None: data['params']['device'] = device if component is not None: data['params']['component'] = component if event_class is not None: data['params']['eventClass'] = event_class log.info('Getting events for %s', data) return self.__router_request( 'EventsRouter', 'query', [data])['events'] def get_event_detail(self, event_id): '''Find specific event details ''' data = dict(evid=event_id) return self.__router_request('EventsRouter', 'detail', [data]) def write_log(self, event_id, message): '''Write a message to the event's log ''' data = dict(evid=event_id, message=message) return self.__router_request('EventsRouter', 'write_log', [data]) def change_event_state(self, event_id, state): '''Change the state of an event. ''' log.info('Changing eventState on %s to %s', event_id, state) return self.__router_request('EventsRouter', state, [{'evids': [event_id]}]) def ack_event(self, event_id): '''Helper method to set the event state to acknowledged. ''' return self.change_event_state(event_id, 'acknowledge') def close_event(self, event_id): '''Helper method to set the event state to closed. ''' return self.change_event_state(event_id, 'close') def create_event_on_device(self, device_name, severity, summary, component='', evclasskey='', evclass=''): '''Manually create a new event for the device specified. ''' log.info('Creating new event for %s with severity %s', device_name, severity) if severity not in ('Critical', 'Error', 'Warning', 'Info', 'Debug', 'Clear'): raise Exception('Severity %s is not valid.' % severity) data = dict(device=device_name, summary=summary, severity=severity, component=component, evclasskey=evclasskey, evclass=evclass) return self.__router_request('EventsRouter', 'add_event', [data]) def get_load_average(self, device): '''Returns the current 1, 5 and 15 minute load averages for a device. ''' dsnames = ('laLoadInt1_laLoadInt1', 'laLoadInt5_laLoadInt5', 'laLoadInt15_laLoadInt15') result = self.get_rrd_values(device=device, dsnames=dsnames) def normalize_load(load): '''Convert raw RRD load average to something reasonable so that it matches output from /proc/loadavg''' return round(float(load) / 100.0, 2) return [normalize_load(l) for l in result.values()]
zenoss
/zenoss-0.6.3.tar.gz/zenoss-0.6.3/zenoss.py
zenoss.py
zenoss.toolbox ============== *Current version: 0.5.2* Utilities for analyzing/debugging Zenoss environments as well as tools to increase/maintain performance. *How do I Install and Use the zenoss.toolbox Package?* * https://support.zenoss.com/hc/en-us/articles/203117595 Tools Included ============== zodbscan -------- *How do I use zodbscan to Scan zodb for Dangling References?* * https://support.zenoss.com/hc/en-us/articles/203118175 findposkeyerror --------------- *How do I use findposkeyerror to detect and fix POSKeyErrors in zodb?* * https://support.zenoss.com/hc/en-us/articles/203117795 zenrelationscan --------------- *How do I use zenrelationscan to scan/fix ZenRelations* * https://support.zenoss.com/hc/en-us/articles/203121165 zencatalogscan -------------- *How do I use zencatalogscan to detect and fix unresolvable object references in catalogs?* * https://support.zenoss.com/hc/en-us/articles/203118075 zenindextool ------------ *How to use zenindextool to reindex top-level organizers* * https://support.zenoss.com/hc/en-us/articles/203263689 zennetworkclean --------------- *How to use zennetworkclean to remove unused network information* * https://support.zenoss.com/hc/en-us/articles/203263699 Author: Brian Bibeault ([email protected])
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/README.rst
README.rst
#!/opt/zenoss/bin/python scriptVersion = "0.9.1" import argparse import datetime import Globals import logging import os import socket import sys import time import traceback import transaction from Acquisition import aq_parent from Products.ZenUtils.ZenScriptBase import ZenScriptBase from ZODB.transact import transact def configure_logging(scriptname): '''Configure logging for zenoss.toolbox tool usage''' # Confirm /tmp, $ZENHOME and check for $ZENHOME/log/toolbox (create if needed) if not os.path.exists('/tmp'): print "/tmp doesn't exist - aborting" exit(1) zenhome_path = os.getenv("ZENHOME") if not zenhome_path: print "$ZENHOME undefined - are you running as the zenoss user?" exit(1) log_file_path = os.path.join(zenhome_path, 'log', 'toolbox') if not os.path.exists(log_file_path): os.makedirs(log_file_path) # Setup "trash" toolbox log file (needed for ZenScriptBase log overriding) logging.basicConfig(filename='/tmp/toolbox.log.tmp', filemode='w', level=logging.INFO) # Create full path filename string for logfile, create RotatingFileHandler toolbox_log = logging.getLogger("%s" % (scriptname)) toolbox_log.setLevel(logging.INFO) log_file_name = os.path.join(zenhome_path, 'log', 'toolbox', '%s.log' % (scriptname)) handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=8192*1024, backupCount=5) # Set logging.Formatter for format and datefmt, attach handler formatter = logging.Formatter('%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) toolbox_log.addHandler(handler) # Print initialization string to console, log status to logfile print("\n[%s] Initializing %s (detailed log at %s)\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), scriptname, log_file_name)) toolbox_log.info("Initializing %s" % (scriptname)) return toolbox_log def get_lock(process_name, log): '''Global lock function to keep multiple tools from running at once''' global lock_socket lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: lock_socket.bind('\0' + process_name) log.debug("Acquired '%s' execution lock" % (process_name)) except socket.error: print("[%s] Unable to acquire %s socket lock - are other tools already running?\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), process_name)) log.error("'%s' lock already exists - unable to acquire - exiting" % (process_name)) log.info("############################################################") return False return True def inline_print(message): '''Print message on a single line using sys.stdout.write, .flush''' sys.stdout.write("\r%s" % (message)) sys.stdout.flush() def scan_progress_message(done, fix, cycle, catalog, issues, total_number_of_issues, percentage, chunk, log): '''Handle output to screen and logfile, remove output from scan_catalog logic''' # Logic for log file output messages based on done, issues if not done: log.debug("Scan of %s catalog is %2d%% complete" % (catalog, 2*chunk)) else: if issues > 0: log.warning("Scanned %s - found %d stale reference(s)" % (catalog, issues)) else: log.info("No stale references found scanning: %s" % (catalog)) log.debug("Scan of %s catalog is complete" % (catalog)) # Logic for screen output messages based on done, issues, and fix if issues > 0: if fix: if not done: inline_print("[%s] Cleaning [%-50s] %3d%% [%d orphaned IPs are deleted]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] Clean #%2.0d [%-50s] %3.0d%% [%d orphaned IPs are deleted]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, '='*50, 100, issues)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% [%d orphaned IPs are detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] WARNING [%-50s] %3.0d%% [There are %d orphaned IPs (%.1f%%)]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, issues, percentage)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk)) else: if (total_number_of_issues == 0): inline_print("[%s] Verified [%-50s] %3.0d%% [No issues] \n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100)) else: inline_print("[%s] Verified [%-50s] %3.0d%% [%d orphaned IPs are deleted (%.1f%%)] \n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, total_number_of_issues, percentage)) @transact def scan_catalog(catalog_name, catalog_list, fix, max_cycles, dmd, log): """Scan through a catalog looking for broken references""" catalog = catalog_list[0] initial_catalog_size = catalog_list[1] print("[%s] Examining %-35s (%d Objects)" % (time.strftime("%Y-%m-%d %H:%M:%S"), catalog_name, initial_catalog_size)) log.info("Examining %s catalog with %d objects" % (catalog_name, initial_catalog_size)) number_of_issues = -1 total_number_of_issues = 0 current_cycle = 0 if not fix: max_cycles = 1 while ((current_cycle < max_cycles) and (number_of_issues != 0)): number_of_issues = 0 current_cycle += 1 if (fix): log.info("Beginning cycle %d for catalog %s" % (current_cycle, catalog_name)) scanned_count = 0 progress_bar_chunk_size = 1 # ZEN-12165: show progress bar immediately before 'for' time overhead, before loading catalog scan_progress_message(False, fix, current_cycle, catalog_name, 0, 0, 0, 0, log) try: brains = catalog() catalog_size = len(brains) if (catalog_size > 50): progress_bar_chunk_size = (catalog_size//50) + 1 except Exception: raise for brain in brains: scanned_count += 1 if (scanned_count % progress_bar_chunk_size) == 0: chunk_number = scanned_count // progress_bar_chunk_size scan_progress_message(False, fix, current_cycle, catalog_name, number_of_issues, 0, 0, chunk_number, log) try: ip = brain.getObject() if not ip.interface(): if not fix: ip._p_deactivate() raise Exception ip._p_deactivate() except Exception: number_of_issues += 1 log.warning("Catalog %s contains orphaned object %s" % (catalog_name, ip.viewName())) if fix: log.info("Attempting to delete %s" % (ip.viewName())) try: parent = aq_parent(ip) parent._delObject(ip.id) ip._p_deactivate() except Exception as e: log.exception(e) total_number_of_issues += number_of_issues percentage = total_number_of_issues*1.0/initial_catalog_size*100 scan_progress_message(True, fix, current_cycle, catalog_name, number_of_issues, total_number_of_issues, percentage, chunk_number, log) if number_of_issues > 0: # print 'total_number_of_issues: {0}'.format(total_number_of_issues) return True, number_of_issues return False def build_catalog_dict(dmd, log): """Builds a list of catalogs present and > 0 objects""" catalogs_to_check = { 'Networks.ipSearch': 'dmd.Networks.ipSearch', 'IPv6Networks.ipSearch': 'dmd.IPv6Networks.ipSearch', } log.debug("Checking %d supported catalogs for (presence, not empty)" % (len(catalogs_to_check))) intermediate_catalog_dict = {} for catalog in catalogs_to_check.keys(): try: temp_brains = eval(catalogs_to_check[catalog]) if len(temp_brains) > 0: log.debug("Catalog %s exists, has items - adding to list" % (catalog)) intermediate_catalog_dict[catalog] = [eval(catalogs_to_check[catalog]), len(temp_brains)] else: log.debug("Skipping catalog %s - exists but has no items" % (catalog)) except AttributeError: log.debug("Skipping catalog %s - catalog not found" % (catalog)) except Exception, e: log.exception(e) return intermediate_catalog_dict def parse_options(): """Defines command-line options for script """ parser = argparse.ArgumentParser(version=scriptVersion, description="Removes old unused ip addresses. Documentation available at " "https://support.zenoss.com/hc/en-us/articles/203263699") parser.add_argument("-v10", "--debug", action="store_true", default=False, help="verbose log output (debug logging)") parser.add_argument("-f", "--fix", action="store_true", default=False, help="attempt to remove any stale references") parser.add_argument("-n", "--cycles", action="store", default="12", type=int, help="maximum times to cycle (with --fix)") parser.add_argument("-l", "--list", action="store_true", default=False, help="output all supported catalogs") parser.add_argument("-c", "--catalog", action="store", default="", help="only scan/fix specified catalog") return vars(parser.parse_args()) def main(): '''Removes old unused ip addresses. If --fix, attempts to remove old unused ip addresses. Builds list of available non-empty catalogs.''' execution_start = time.time() cli_options = parse_options() log = configure_logging('zennetworkclean') log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) # Attempt to get the zenoss.toolbox lock before any actions performed if not get_lock("zenoss.toolbox", log): sys.exit(1) # Obtain dmd ZenScriptBase connection dmd = ZenScriptBase(noopts=True, connect=True).dmd log.debug("ZenScriptBase connection obtained") any_issue = [False, 0] unrecognized_catalog = False # Build list of catalogs, then process catalog(s) and perform reindex if --fix present_catalog_dict = build_catalog_dict(dmd, log) if cli_options['list']: # Output list of present catalogs to the UI, perform no further operations print "List of supported Zenoss catalogs to examine:\n" print "\n".join(present_catalog_dict.keys()) log.info("zennetworkclean finished - list of supported catalogs output to CLI") else: # Scan through catalog(s) depending on --catalog parameter if cli_options['catalog']: if cli_options['catalog'] in present_catalog_dict.keys(): # Catalog provided as parameter is present - scan just that catalog any_issue = scan_catalog(cli_options['catalog'], present_catalog_dict[cli_options['catalog']], cli_options['fix'], cli_options['cycles'], dmd, log) else: unrecognized_catalog = True print("Catalog '%s' unrecognized - unable to scan" % (cli_options['catalog'])) log.error("CLI input '%s' doesn't match recognized catalogs" % (cli_options['catalog'])) else: # Else scan for all catalogs in present_catalog_dict for catalog in present_catalog_dict.keys(): any_issue = scan_catalog(catalog, present_catalog_dict[catalog], cli_options['fix'], cli_options['cycles'], dmd, log) or any_issue # Print final status summary, update log file with termination block print("\n[%s] Execution finished in %s\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), datetime.timedelta(seconds=int(time.time() - execution_start)))) log.info("zennetworkclean completed in %1.2f seconds" % (time.time() - execution_start)) log.info("############################################################") if any_issue and not cli_options['fix']: sys.exit(1) else: sys.exit(0) if __name__ == "__main__": main()
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/src/zenoss/toolbox/zennetworkclean.py
zennetworkclean.py
#!/opt/zenoss/bin/python scriptVersion = "1.6.2" import abc import argparse import datetime import Globals import logging import os import re import socket import sys import time import traceback import transaction from multiprocessing import Lock, Value from time import localtime, strftime from ZODB.POSException import POSKeyError from ZODB.utils import u64 from Products.ZenRelations.ToManyContRelationship import ToManyContRelationship from Products.ZenRelations.RelationshipBase import RelationshipBase from Products.ZenUtils.ZenScriptBase import ZenScriptBase from Products.ZenUtils.Utils import unused try: from ZenPacks.zenoss.AdvancedSearch.SearchManager import SearchManager, SEARCH_MANAGER_ID except ImportError: pass unused(Globals) def configure_logging(scriptname): '''Configure logging for zenoss.toolbox tool usage''' # Confirm /tmp, $ZENHOME and check for $ZENHOME/log/toolbox (create if needed) if not os.path.exists('/tmp'): print "/tmp doesn't exist - aborting" exit(1) zenhome_path = os.getenv("ZENHOME") if not zenhome_path: print "$ZENHOME undefined - are you running as the zenoss user?" exit(1) log_file_path = os.path.join(zenhome_path, 'log', 'toolbox') if not os.path.exists(log_file_path): os.makedirs(log_file_path) # Setup "trash" toolbox log file (needed for ZenScriptBase log overriding) logging.basicConfig(filename='/tmp/toolbox.log.tmp', filemode='w', level=logging.INFO) # Create full path filename string for logfile, create RotatingFileHandler toolbox_log = logging.getLogger("%s" % (scriptname)) toolbox_log.setLevel(logging.INFO) log_file_name = os.path.join(zenhome_path, 'log', 'toolbox', '%s.log' % (scriptname)) handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=8192*1024, backupCount=5) # Set logging.Formatter for format and datefmt, attach handler formatter = logging.Formatter('%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) toolbox_log.addHandler(handler) # Print initialization string to console, log status to logfile print("\n[%s] Initializing %s (detailed log at %s)\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), scriptname, log_file_name)) toolbox_log.info("Initializing %s" % (scriptname)) return toolbox_log def get_lock(process_name, log): '''Global lock function to keep multiple tools from running at once''' global lock_socket lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: lock_socket.bind('\0' + process_name) log.debug("Acquired '%s' execution lock" % (process_name)) except socket.error: print("[%s] Unable to acquire %s socket lock - are other tools already running?\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), process_name)) log.error("'%s' lock already exists - unable to acquire - exiting" % (process_name)) log.info("############################################################") return False return True def inline_print(message): '''Print message on a single line using sys.stdout.write, .flush''' sys.stdout.write("\r%s" % (message)) sys.stdout.flush() class Counter(object): def __init__(self, initval=0): self.val = Value('i', initval) self.lock = Lock() def increment(self): with self.lock: self.val.value += 1 def value(self): with self.lock: return self.val.value def progress_bar(items, errors, repairs, fix_value): if fix_value: inline_print("[%s] | Items Scanned: %12d | Errors: %6d | Repairs: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), items, errors, repairs)) else: inline_print("[%s] | Items Scanned: %12d | Errors: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), items, errors)) class Fixer(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod def fixable(self, ex, objId, parentPath, dmd, log): """ Return a no-argument callable object that will perform the fix when invoked or None if not fixable. """ class RelFixer(Fixer): def fixable(self, ex, relId, parentPath, dmd, log): """ Return True if this object can fix the exception. """ try: parent = dmd.getObjByPath(parentPath) relationship = parent._getOb(relId) if not isinstance(relationship, RelationshipBase): return None badobj = getattr(relationship, "_objects", None) if badobj is None: log.warning("Cannot fix relationship - no _objects attribute") return None exOID = getOID(ex) relOID = getPOID(relationship._objects) if exOID == relOID: return lambda: self._fix(exOID, relOID, relationship, parent, dmd, log) else: log.warning("Cannot fix this relationship - exOID %s != relOID %s" % (exOID, relOID)) except: return None def _fix(self, exOID, relOID, relationship, parent, dmd, log): """ Attempt to fix the POSKeyError """ cls = relationship._objects.__class__ relationship._objects = cls() parent._p_changed = True transaction.commit() class SearchManagerFixer(Fixer): """ SearchManagerFixer fixes SearchManager POSKeyErrors like: POSKeyError: 0x0683923b on attribute 'SearchManager' of app.zport.dmd.ZenUsers.svs """ # >>> dmd.ZenUsers.svs.SearchManager.__class__ # <class 'ZenPacks.zenoss.AdvancedSearch.SearchManager.SearchManager'> # >>> find('svs') # <UserSettings at /zport/dmd/ZenUsers/svs> # >>> d=_ # >>> d._delOb('SearchManager') # >>> commit() def fixable(self, ex, objId, parentPath, dmd, log): """ Return True if this object can fix the exception. """ if objId != 'SearchManager': return None parent = dmd.getObjByPath(parentPath) obj = parent._getOb(objId) if not isinstance(obj, SearchManager): return None exOID = getOID(ex) relOID = getPOID(obj) if exOID == relOID: return lambda: self._fix(exOID, parent, dmd, log) return None def _fix(self, exOID, parent, dmd, log): """ Delete only; a new one will be created when a SearchProvider is requested. """ try: parent._delOb('SearchManager') except Exception as e: log.exception(e) transaction.commit() try: parent._setObject(SEARCH_MANAGER_ID, SearchManager(SEARCH_MANAGER_ID)) except Exception as e: log.exception(e) transaction.commit() class ComponentSearchFixer(Fixer): """ ComponentSearchFixer fixes ComponentSearch POSKeyErrors like: POSKeyError: 0x070039e0 on attribute 'componentSearch' of app.zport.dmd.Devices.Network.Juniper.mx.mx_240.devices.edge1.fra """ def fixable(self, ex, objId, parentPath, dmd, log): """ Return True if this object can fix the exception. """ if objId != 'componentSearch': return None parent = dmd.getObjByPath(parentPath) obj = parent._getOb(objId) exOID = getOID(ex) relOID = getPOID(obj) if exOID == relOID: return lambda: self._fix(exOID, parent, dmd, log) return None def _fix(self, exOID, parent, dmd, log): """ Attempt to remove and recreate the componentSearch() """ try: parent._delOb('componentSearch') except Exception as e: log.exception(e) transaction.commit() try: parent._create_componentSearch() except Exception as e: log.exception(e) transaction.commit() _fixits = [RelFixer(), SearchManagerFixer(), ComponentSearchFixer(), ] def _getEdges(node): cls = node.aq_base names = set(node.objectIds() if hasattr(cls, "objectIds") else []) relationships = set( node.getRelationshipNames() if hasattr(cls, "getRelationshipNames") else [] ) return (names - relationships), relationships _RELEVANT_EXCEPTIONS = (POSKeyError, KeyError, AttributeError) def _getPathStr(path): return "app%s" % ('.'.join(path)) if len(path) > 1 else "app" def fixPOSKeyError(exname, ex, objType, objId, parentPath, dmd, log, counters): """ Fixes POSKeyErrors given: Name of exception type object, Exception, Type of problem object, Name (ID) of the object, The path to the parent of the named object """ # -- verify that the OIDs match for fixer in _fixits: fix = fixer.fixable(ex, objId, parentPath, dmd, log) if fix: log.info("Attempting to repair %s issue on %s" % (ex, objId)) counters['repair_count'].increment() fix() break def getPOID(obj): # from ZODB.utils import u64 return "0x%08x" % u64(obj._p_oid) def getOID(ex): return "0x%08x" % int(str(ex), 16) def findPOSKeyErrors(topnode, attempt_fix, use_unlimited_memory, dmd, log, counters): """ Processes issues as they are found, handles progress output, logs to output file """ PROGRESS_INTERVAL = 829 # Prime number near 1000 ending in a 9, used for progress bar # Objects that will have their children traversed are stored in 'nodes' nodes = [topnode] while nodes: node = nodes.pop(0) counters['item_count'].increment() path = node.getPhysicalPath() path_string = "/".join(path) if (counters['item_count'].value() % PROGRESS_INTERVAL) == 0: if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) try: attributes, relationships = _getEdges(node) except _RELEVANT_EXCEPTIONS as e: log.warning("%s: %s %s '%s'" % (type(e).__name__, e, "while retreiving children of", path_string)) counters['error_count'].increment() if attempt_fix: if isinstance(e, POSKeyError): fixPOSKeyError(type(e).__name__, e, "node", name, path, dmd, log, counters) continue except Exception as e: log.exception(e) for name in relationships: try: if (counters['item_count'].value() % PROGRESS_INTERVAL) == 0: if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) counters['item_count'].increment() rel = node._getOb(name) rel() # ToManyContRelationship objects should have all referenced objects traversed if isinstance(rel, ToManyContRelationship): nodes.append(rel) except SystemError as e: # to troubleshoot traceback in: # https://dev.zenoss.com/tracint/pastebin/4769 # ./findposkeyerror --fixrels /zport/dmd/ # SystemError: new style getargs format but argument is not a tuple log.warning("%s: %s on %s '%s' of %s" % (type(e).__name__, e, "relationship", name, path_string)) raise # Not sure why we are raising this vs. logging and continuing except _RELEVANT_EXCEPTIONS as e: counters['error_count'].increment() log.warning("%s: %s on %s '%s' of %s" % (type(e).__name__, e, "relationship", name, path_string)) if attempt_fix: if isinstance(e, POSKeyError): fixPOSKeyError(type(e).__name__, e, "attribute", name, path, dmd, log, counters) except Exception as e: log.warning("%s: %s on %s '%s' of %s" % (type(e).__name__, e, "relationship", name, path_string)) for name in attributes: try: if (counters['item_count'].value() % PROGRESS_INTERVAL) == 0: if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) counters['item_count'].increment() childnode = node._getOb(name) childnode.getId() except _RELEVANT_EXCEPTIONS as e: counters['error_count'].increment() log.warning("%s: %s on %s '%s' of %s" % (type(e).__name__, e, "attribute", name, path_string)) if attempt_fix: if isinstance(e, POSKeyError): fixPOSKeyError(type(e).__name__, e, "attribute", name, path, dmd, log, counters) except Exception as e: log.warning("%s: %s on %s '%s' of %s" % (type(e).__name__, e, "relationship", name, path_string)) else: # No exception, so it should be safe to add this child node as a traversable object. nodes.append(childnode) if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) def parse_options(): """Defines command-line options for script """ """ NOTE: With --unlimitedram in my testing, I have seen RAM usage grow to just over 2x the size of 'du -h /opt/zends/data/zodb'. For a 20GB /opt/zends/data/zodb folder, I saw RAM usage of ~ 42GB""" parser = argparse.ArgumentParser(version=scriptVersion, description="Scans a zodb path for POSKeyErrors - addtional information " "at https://support.zenoss.com/hc/en-us/articles/203117795") parser.add_argument("-v10", "--debug", action="store_true", default=False, help="verbose log output (debug logging)") parser.add_argument("-f", "--fix", action="store_true", default=False, help="attempt to fix ZenRelationship objects") parser.add_argument("-p", "--path", action="store", default="/", type=str, help="base path to scan from (Devices.Server)?") parser.add_argument("-u", "--unlimitedram", action="store_true", default=False, help="skip transaction.abort() - unbounded RAM, ~40%% faster") return vars(parser.parse_args()) def main(): """ Scans through zodb hierarchy (from user-supplied path, defaults to /, checking for PKEs """ execution_start = time.time() cli_options = parse_options() log = configure_logging('findposkeyerror') log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) # Attempt to get the zenoss.toolbox lock before any actions performed if not get_lock("zenoss.toolbox", log): sys.exit(1) # Obtain dmd ZenScriptBase connection dmd = ZenScriptBase(noopts=True, connect=True).dmd log.debug("ZenScriptBase connection obtained") counters = { 'item_count': Counter(0), 'error_count': Counter(0), 'repair_count': Counter(0) } processed_path = re.split("[./]", cli_options['path']) if processed_path[0] == "app": processed_path = processed_path[1:] processed_path = '/'.join(processed_path) if processed_path else '/' try: folder = dmd.getObjByPath(processed_path) except KeyError: print "Invalid path: %s" % (cli_options['path']) else: print("[%s] Examining items under the '%s' path (%s):\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime()), cli_options['path'], folder)) log.info("Examining items under the '%s' path (%s)" % (cli_options['path'], folder)) findPOSKeyErrors(folder, cli_options['fix'], cli_options['unlimitedram'], dmd, log, counters) print print("\n[%s] Execution finished in %s\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime()), datetime.timedelta(seconds=int(time.time() - execution_start)))) log.info("findposkeyerror completed in %1.2f seconds" % (time.time() - execution_start)) log.info("############################################################") if ((counters['error_count'].value() > 0) and not cli_options['fix']): print("** WARNING ** Issues were detected - Consult KB article at") print(" https://support.zenoss.com/hc/en-us/articles/203117795\n") sys.exit(1) else: sys.exit(0) if __name__ == "__main__": main()
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/src/zenoss/toolbox/findposkeyerror.py
findposkeyerror.py
#!/opt/zenoss/bin/python scriptVersion = "0.9.1" import argparse import datetime import Globals import logging import os import socket import sys import time import traceback import transaction from multiprocessing import Lock, Value from time import localtime, strftime from Products.CMFCore.utils import getToolByName from Products.ZenUtils.Utils import getAllConfmonObjects from Products.ZenUtils.ZenScriptBase import ZenScriptBase from Products.Zuul.catalog.events import IndexingEvent from ZODB.transact import transact from zope.event import notify from ZODB.POSException import POSKeyError def configure_logging(scriptname): '''Configure logging for zenoss.toolbox tool usage''' # Confirm /tmp, $ZENHOME and check for $ZENHOME/log/toolbox (create if needed) if not os.path.exists('/tmp'): print "/tmp doesn't exist - aborting" exit(1) zenhome_path = os.getenv("ZENHOME") if not zenhome_path: print "$ZENHOME undefined - are you running as the zenoss user?" exit(1) log_file_path = os.path.join(zenhome_path, 'log', 'toolbox') if not os.path.exists(log_file_path): os.makedirs(log_file_path) # Setup "trash" toolbox log file (needed for ZenScriptBase log overriding) logging.basicConfig(filename='/tmp/toolbox.log.tmp', filemode='w', level=logging.INFO) # Create full path filename string for logfile, create RotatingFileHandler toolbox_log = logging.getLogger("%s" % (scriptname)) toolbox_log.setLevel(logging.INFO) log_file_name = os.path.join(zenhome_path, 'log', 'toolbox', '%s.log' % (scriptname)) handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=8192*1024, backupCount=5) # Set logging.Formatter for format and datefmt, attach handler formatter = logging.Formatter('%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) toolbox_log.addHandler(handler) # Print initialization string to console, log status to logfile print("\n[%s] Initializing %s (detailed log at %s)\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), scriptname, log_file_name)) toolbox_log.info("Initializing %s" % (scriptname)) return toolbox_log def get_lock(process_name, log): '''Global lock function to keep multiple tools from running at once''' global lock_socket lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: lock_socket.bind('\0' + process_name) log.debug("Acquired '%s' execution lock" % (process_name)) except socket.error: print("[%s] Unable to acquire %s socket lock - are other tools already running?\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), process_name)) log.error("'%s' lock already exists - unable to acquire - exiting" % (process_name)) log.info("############################################################") return False return True def inline_print(message): '''Print message on a single line using sys.stdout.write, .flush''' sys.stdout.write("\r%s" % (message)) sys.stdout.flush() class Counter(object): def __init__(self, initval=0): self.val = Value('i', initval) self.lock = Lock() def increment(self): with self.lock: self.val.value += 1 def value(self): with self.lock: return self.val.value def progress_bar(items, errors, repairs, fix_value): if fix_value: inline_print("[%s] | Items Scanned: %12d | Errors: %6d | Repairs: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), items, errors, repairs)) else: inline_print("[%s] | Items Scanned: %12d | Errors: %6d | " % (time.strftime("%Y-%m-%d %H:%M:%S"), items, errors)) def scan_relationships(attempt_fix, max_cycles, use_unlimited_memory, dmd, log, counters): '''Scan through zodb relationships looking for broken references''' # ENTIRETY OF REBUILD CODE FROM ZenUtils/CheckRelations.py (for reference) # def rebuild(self): # repair = self.options.repair # ccount = 0 # for object in getAllConfmonObjects(self.dmd): # ccount += 1 # self.log.debug("checking relations on object %s" # % object.getPrimaryDmdId()) # object.checkRelations(repair=repair) # ch = object._p_changed # if not ch: object._p_deactivate() # if ccount >= self.options.savepoint: # transaction.savepoint() # ccount = 0 # if self.options.nocommit: # self.log.info("not commiting any changes") # else: # trans = transaction.get() # trans.note('CheckRelations cleaned relations' ) # trans.commit() PROGRESS_INTERVAL = 829 # Prime number near 1000 ending in a 9, used for progress bar print("[%s] Examining ZenRelations...\n" % (time.strftime("%Y-%m-%d %H:%M:%S"))) log.info("Examining ZenRelations...") number_of_issues = -1 current_cycle = 0 if not attempt_fix: max_cycles = 1 progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) while ((current_cycle < max_cycles) and (number_of_issues != 0)): number_of_issues = 0 current_cycle += 1 if (attempt_fix): log.info("Beginning cycle %d" % (current_cycle)) try: relationships_to_check = getAllConfmonObjects(dmd) except Exception: raise while True: try: object = relationships_to_check.next() counters['item_count'].increment() if (counters['item_count'].value() % PROGRESS_INTERVAL) == 0: if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) log.debug("Processed %d items" % (counters['item_count'].value())) try: object.checkRelations(repair=attempt_fix) changed = object._p_changed if not changed: object._p_deactivate() else: transaction.commit() log.debug("Checked object %s" % (object.getPrimaryDmdId())) except Exception as e: log.exception(e) counters['error_count'].increment() counters['repair_count'].increment() except: try: log.error("Object %s had broken relationship" % (object.getPrimaryDmdId())) except: log.error("Object had issues loading - PKE") counters['error_count'].increment() counters['repair_count'].increment() except StopIteration: break except Exception as e: log.exception(e) if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) print("\n\n#################################################################") print "CRITICAL: Exception encountered - aborting. Please see log file." print("#################################################################") return if not use_unlimited_memory: transaction.abort() progress_bar(counters['item_count'].value(), counters['error_count'].value(), counters['repair_count'].value(), attempt_fix) print def parse_options(): """Defines and parses command-line options for script """ parser = argparse.ArgumentParser(version=scriptVersion, description="Scans ZenRelations for issues. Additional documentat at " "https://support.zenoss.com/hc/en-us/articles/203121165") parser.add_argument("-v10", "--debug", action="store_true", default=False, help="verbose log output (debug logging)") parser.add_argument("-f", "--fix", action="store_true", default=False, help="attempt to remove any invalid references") parser.add_argument("-n", "--cycles", action="store", default="2", type=int, help="maximum times to cycle (with --fix)") parser.add_argument("-u", "--unlimitedram", action="store_true", default=False, help="skip transaction.abort() - unbounded RAM, ~40%% faster") return vars(parser.parse_args()) def main(): '''Scans zodb objects for ZenRelations issues. If --fix, attempts repair.''' execution_start = time.time() cli_options = parse_options() log = configure_logging('zenrelationscan') log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) counters = { 'item_count': Counter(0), 'error_count': Counter(0), 'repair_count': Counter(0) } # Attempt to get the zenoss.toolbox lock before any actions performed if not get_lock("zenoss.toolbox", log): sys.exit(1) # Obtain dmd ZenScriptBase connection dmd = ZenScriptBase(noopts=True, connect=True).dmd log.debug("ZenScriptBase connection obtained") scan_relationships(cli_options['fix'], cli_options['cycles'], cli_options['unlimitedram'], dmd, log, counters) # Print final status summary, update log file with termination block print("\n[%s] Execution finished in %s\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime()), datetime.timedelta(seconds=int(time.time() - execution_start)))) log.info("zenrelationscan completed in %1.2f seconds" % (time.time() - execution_start)) log.info("############################################################") if ((counters['error_count'].value() > 0) and not cli_options['fix']): print("** WARNING ** Issues were detected - Consult KB article at") print(" https://support.zenoss.com/hc/en-us/articles/203121165\n") sys.exit(1) else: sys.exit(0) if __name__ == "__main__": main()
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/src/zenoss/toolbox/zenrelationscan.py
zenrelationscan.py
#!/opt/zenoss/bin/python scriptVersion = "1.0.1" import Globals import argparse import sys import os import traceback import logging import socket import time import datetime import transaction import cStringIO import tempfile import cPickle import ZConfig from pickle import Unpickler as UnpicklerBase from collections import deque from time import localtime, strftime from multiprocessing import Lock, Value from relstorage.zodbpack import schema_xml from Products.ZenUtils.ZenScriptBase import ZenScriptBase from Products.ZenUtils.AutoGCObjectReader import gc_cache_every from Products.ZenUtils.GlobalConfig import getGlobalConfiguration from Products.ZenRelations.ToManyContRelationship import ToManyContRelationship from Products.ZenRelations.RelationshipBase import RelationshipBase from ZODB.transact import transact from ZODB.POSException import POSKeyError from ZODB.DB import DB from ZODB.utils import u64 def configure_logging(scriptname): '''Configure logging for zenoss.toolbox tool usage''' # Confirm /tmp, $ZENHOME and check for $ZENHOME/log/toolbox (create if needed) if not os.path.exists('/tmp'): print "/tmp doesn't exist - aborting" exit(1) zenhome_path = os.getenv("ZENHOME") if not zenhome_path: print "$ZENHOME undefined - are you running as the zenoss user?" exit(1) log_file_path = os.path.join(zenhome_path, 'log', 'toolbox') if not os.path.exists(log_file_path): os.makedirs(log_file_path) # Setup "trash" toolbox log file (needed for ZenScriptBase log overriding) logging.basicConfig(filename='/tmp/toolbox.log.tmp', filemode='w', level=logging.INFO) # Create full path filename string for logfile, create RotatingFileHandler toolbox_log = logging.getLogger("%s" % (scriptname)) toolbox_log.setLevel(logging.INFO) log_file_name = os.path.join(zenhome_path, 'log', 'toolbox', '%s.log' % (scriptname)) handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=8192*1024, backupCount=5) # Set logging.Formatter for format and datefmt, attach handler formatter = logging.Formatter('%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) toolbox_log.addHandler(handler) # Print initialization string to console, log status to logfile print("\n[%s] Initializing %s (detailed log at %s)\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), scriptname, log_file_name)) toolbox_log.info("Initializing %s" % (scriptname)) return toolbox_log def get_lock(process_name, log): '''Global lock function to keep multiple tools from running at once''' global lock_socket lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: lock_socket.bind('\0' + process_name) log.debug("Acquired '%s' execution lock" % (process_name)) except socket.error: print("[%s] Unable to acquire %s socket lock - are other tools already running?\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), process_name)) log.error("'%s' lock already exists - unable to acquire - exiting" % (process_name)) log.info("############################################################") return False return True def inline_print(message): '''Print message on a single line using sys.stdout.write, .flush''' sys.stdout.write("\r%s" % (message)) sys.stdout.flush() class Counter(object): def __init__(self, initval=0): self.val = Value('i', initval) self.lock = Lock() def increment(self): with self.lock: self.val.value += 1 def value(self): with self.lock: return self.val.value schema = ZConfig.loadSchemaFile(cStringIO.StringIO(schema_xml)) class Analyzer(UnpicklerBase): """ Able to analyze an object's pickle to try to figure out the name/class of the problem oid. """ def __init__(self, pickle, problem_oid): UnpicklerBase.__init__(self, cStringIO.StringIO(pickle)) self.problem_oid = problem_oid self._marker = object() self.klass = None def persistent_load(self, pickle_id): if isinstance(pickle_id, tuple): oid, klass = pickle_id if oid == self.problem_oid: self.klass = klass return self._marker else: pass def get_refs(p): """ Generator-using version of ZODB.serialize.references """ refs = [] u = cPickle.Unpickler(cStringIO.StringIO(p)) u.persistent_load = refs u.noload() u.noload() for ref in refs: if isinstance(ref, tuple): yield ref[0] elif isinstance(ref, str): yield ref else: assert isinstance(ref, list) yield ref[1][:2] def get_config(database=None): conf = getGlobalConfiguration() if database: conf['zodb-db'] = conf['mysqldb'] = database else: conf['mysqldb'] = conf.get('mysqldb', conf.get('zodb-db')) conf['zodb-db'] = conf.get('zodb-db', conf.get('mysqldb')) zodb_socket = conf.get('mysqlsocket', conf.get('zodb-socket')) if zodb_socket: conf['socket'] = 'unix_socket %s' % zodb_socket else: conf['socket'] = '' newer_conf = { 'zodb-host': conf.get('host'), 'zodb-port': conf.get('port'), 'zodb-db': conf.get('mysqldb'), 'zodb-user': conf.get('mysqluser'), 'zodb-password': conf.get('mysqlpasswd') } newer_conf.update(conf) _storage_config = """ <relstorage> pack-gc true keep-history false <mysql> host %(zodb-host)s port %(zodb-port)s db %(zodb-db)s user %(zodb-user)s passwd %(zodb-password)s %(socket)s </mysql> </relstorage> """ % newer_conf with tempfile.NamedTemporaryFile() as configfile: configfile.write(_storage_config) configfile.flush() config, handler = ZConfig.loadConfig(schema, configfile.name) return config class PKEReporter(object): def __init__(self, db='zodb'): self._dbname = db self._config = get_config(db) self._storage = self._config.storages[0].open() self._db = DB(self._storage) self._conn = self._db.open() self._app = self._conn.root() self._size = self.get_total_count() def get_total_count(self): connmanager = self._storage._adapter.connmanager conn, cursor = connmanager.open() try: cursor.execute("SELECT count(zoid) from object_state") row = cursor.fetchone() return long(row[0]) finally: connmanager.close(conn, cursor) def analyze(self, parent_oid, child_oid): parent_state = self._storage.load(parent_oid)[0] pickler = Analyzer(parent_state, child_oid) pickler.load() result = pickler.load() name = None # First try to get the name from the pickle state try: for k, v in result.iteritems(): if v is pickler._marker: name = k break except Exception: pass if not name: # Now load up the child and see if it has an id child = self._conn[child_oid] try: name = child.id except Exception: try: name = child.getId() except Exception: pass if not name: # Check the actual attributes on the parent parent = self._conn[parent_oid] try: for k, v in parent.__dict__.iteritems(): try: if v == child: name = k break except Exception: pass except AttributeError: # catch these errors - AttributeError: 'BTrees.OIBTree.OIBTree' object has no attribute '__dict__' pass return name, pickler.klass @staticmethod def oid_versions(oid): u64ed = u64(oid) oid_0xstyle = "0x%08x" % u64ed repred = repr(oid) return u64ed, oid_0xstyle, repred def report(self, oid, ancestors, log): parent_oid = ancestors[-2] parent_klass = None try: immediate_parent = self._conn[parent_oid] parent_klass = immediate_parent.__class__ path = immediate_parent.getPrimaryPath() except Exception: # Not a PrimaryPathObjectManager, do it manually path = [''] for (a, b) in zip(ancestors[:-2], ancestors[1:-1]): name, klass = self.analyze(a, b) path.append(name) parent_klass = klass path = filter(None, path) name, klass = self.analyze(*ancestors[-2:]) par_u64, par_0x, par_rep = self.oid_versions(parent_oid) oid_u64, oid_0x, oid_rep = self.oid_versions(oid) log.critical(""" DANGLING REFERENCE (POSKeyError) FOUND: PATH: {path} TYPE: {type} OID: {par_0x} {par_rep} {par_u64} Refers to a missing object: NAME: {name} TYPE: {klass} OID: {oid_0x} {oid_rep} {oid_u64} """.format(path='/'.join(path), type=parent_klass, name=name, klass=klass, par_u64=par_u64, par_0x=par_0x, par_rep=par_rep, oid_u64=oid_u64, oid_0x=oid_0x, oid_rep=oid_rep)) def verify(self, root, log, number_of_issues): database_size = self._size scanned_count = 0 progress_bar_chunk_size = 1 if (database_size > 50): progress_bar_chunk_size = (database_size//50) + 1 inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*0, 0)) seen = set() path = () stack = deque([(root, path)]) curstack, stack = stack, deque([]) while curstack or stack: oid, path = curstack.pop() scanned_count = len(seen) if (scanned_count % progress_bar_chunk_size) == 0: chunk_number = scanned_count // progress_bar_chunk_size if number_of_issues.value() > 2: inline_print("[%s] CRITICAL [%-50s] %3d%% [%d Dangling References]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk_number, 2*chunk_number, number_of_issues.value())) elif number_of_issues.value() == 1: inline_print("[%s] CRITICAL [%-50s] %3d%% [%d Dangling Reference]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk_number, 2*chunk_number, number_of_issues.value())) else: inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk_number, 2*chunk_number)) if (oid not in seen): try: state = self._storage.load(oid)[0] seen.add(oid) except POSKeyError: self.report(oid, path, log) number_of_issues.increment() else: refs = get_refs(state) stack.extend((o, path + (o,)) for o in set(refs) - seen) if not curstack: curstack = stack stack = deque([]) if number_of_issues.value() > 0: inline_print("[%s] CRITICAL [%-50s] %3.0d%% [%d Dangling References]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, number_of_issues.value())) else: inline_print("[%s] Verified [%-50s] %3.0d%%\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100)) return number_of_issues, len(seen), self._size def run(self, log, number_of_issues): print("[%s] Examining %d items in the '%s' database:" % (strftime("%Y-%m-%d %H:%M:%S", localtime()), self._size, self._dbname)) log.info("Examining %d items in %s database" % (self._size, self._dbname)) oid = '\x00\x00\x00\x00\x00\x00\x00\x01' with gc_cache_every(1000, self._db): reported, scanned, total = self.verify(oid, log, number_of_issues) if (100.0*scanned/total) < 90.0: print(" ** %3.2f%% of %s objects not reachable - examine your zenossdbpack settings **" % ((100.0-100.0*scanned/total), self._dbname)) log.info("%3.2f%% of %s objects not reachable - examine your zenossdbpack settings" % ((100.0-100.0*scanned/total), self._dbname)) print def parse_options(): """Defines command-line options for script """ parser = argparse.ArgumentParser(version=scriptVersion, description="Scans zodb for dangling references. Additional documentation at " "https://support.zenoss.com/hc/en-us/articles/203118175") parser.add_argument("-v10", "--debug", action="store_true", default=False, help="verbose log output (debug logging)") return vars(parser.parse_args()) def main(): """Scans through zodb hierarchy checking objects for dangling references""" execution_start = time.time() sys.path.append ("/opt/zenoss/Products/ZenModel") # From ZEN-12160 cli_options = parse_options() log = configure_logging('zodbscan') log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) #logging.getLogger('relstorage').setLevel(logging.CRITICAL) #logging.getLogger('ZODB.Connection').setLevel(logging.CRITICAL) # Attempt to get the zenoss.toolbox lock before any actions performed if not get_lock("zenoss.toolbox", log): sys.exit(1) number_of_issues = Counter(0) PKEReporter('zodb').run(log, number_of_issues) log.info("%d Dangling References were detected" % (number_of_issues.value())) print("[%s] Execution finished in %s\n" % (strftime("%Y-%m-%d %H:%M:%S", localtime()), datetime.timedelta(seconds=int(time.time() - execution_start)))) log.info("zodbscan completed in %1.2f seconds" % (time.time() - execution_start)) log.info("############################################################") if (number_of_issues.value() > 0): print("** WARNING ** Dangling Reference(s) were detected - Consult KB article at") print(" https://support.zenoss.com/hc/en-us/articles/203118175\n") sys.exit(1) else: sys.exit(0) if __name__ == "__main__": main()
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/src/zenoss/toolbox/zodbscan.py
zodbscan.py
#!/opt/zenoss/bin/python scriptVersion = "1.0.1" import argparse import datetime import Globals import logging import os import socket import sys import time import traceback import transaction from Products.ZenUtils.ZenScriptBase import ZenScriptBase from Products.Zuul.catalog.events import IndexingEvent from ZODB.transact import transact from zope.event import notify def configure_logging(scriptname): '''Configure logging for zenoss.toolbox tool usage''' # Confirm /tmp, $ZENHOME and check for $ZENHOME/log/toolbox (create if needed) if not os.path.exists('/tmp'): print "/tmp doesn't exist - aborting" exit(1) zenhome_path = os.getenv("ZENHOME") if not zenhome_path: print "$ZENHOME undefined - are you running as the zenoss user?" exit(1) log_file_path = os.path.join(zenhome_path, 'log', 'toolbox') if not os.path.exists(log_file_path): os.makedirs(log_file_path) # Setup "trash" toolbox log file (needed for ZenScriptBase log overriding) logging.basicConfig(filename='/tmp/toolbox.log.tmp', filemode='w', level=logging.INFO) # Create full path filename string for logfile, create RotatingFileHandler toolbox_log = logging.getLogger("%s" % (scriptname)) toolbox_log.setLevel(logging.INFO) log_file_name = os.path.join(zenhome_path, 'log', 'toolbox', '%s.log' % (scriptname)) handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=8192*1024, backupCount=5) # Set logging.Formatter for format and datefmt, attach handler formatter = logging.Formatter('%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) toolbox_log.addHandler(handler) # Print initialization string to console, log status to logfile print("\n[%s] Initializing %s (detailed log at %s)\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), scriptname, log_file_name)) toolbox_log.info("Initializing %s" % (scriptname)) return toolbox_log def get_lock(process_name, log): '''Global lock function to keep multiple tools from running at once''' global lock_socket lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: lock_socket.bind('\0' + process_name) log.debug("Acquired '%s' execution lock" % (process_name)) except socket.error: print("[%s] Unable to acquire %s socket lock - are other tools already running?\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), process_name)) log.error("'%s' lock already exists - unable to acquire - exiting" % (process_name)) log.info("############################################################") return False return True def inline_print(message): '''Print message on a single line using sys.stdout.write, .flush''' sys.stdout.write("\r%s" % (message)) sys.stdout.flush() @transact def index_device(dev, dmd, log): try: notify(IndexingEvent(dev)) dev.index_object(noips=True) except Exception as e: log.exception(e) for comp in dev.getDeviceComponentsNoIndexGen(): try: notify(IndexingEvent(comp)) comp.index_object() except Exception as e: log.exception(e) def reindex_dmd_objects(name, type, dmd, log): try: inline_print("[%s] Reindexing %s ... " % (time.strftime("%Y-%m-%d %H:%M:%S"), name)) if not (name == 'Devices'): object_reference = eval(type) object_reference.reIndex() print("finished") log.info("%s reIndex() completed successfully", name) else: # Special case for Devices, using method from altReindex ZEN-10793 log.info("Reindexing Devices") output_count = 0 for dev in dmd.Devices.getSubDevicesGen_recursive(): index_device(dev, dmd, log) output_count += 1 dev._p_deactivate() transaction.commit() if (output_count % 10) == 0: # sync after 10 devices dmd._p_jar.sync() if (output_count % 100) == 0: log.debug("Device Reindex has passed %d devices" % (output_count)) inline_print("[%s] Reindexing %s ... %8d devices processed" % (time.strftime("%Y-%m-%d %H:%M:%S"), "Devices", output_count)) inline_print("[%s] Reindexing %s ... finished " % (time.strftime("%Y-%m-%d %H:%M:%S"), "Devices")) print "" log.info("%d Devices reindexed successfully" % (output_count)) dmd._p_jar.sync() transaction.commit() except Exception as e: print " FAILED (check log file for details)" log.error("%s.reIndex() failed" % (name)) log.exception(e) def parse_options(): """Defines command-line options for script """ parser = argparse.ArgumentParser(version=scriptVersion, description="Reindexes top-level organizers. Documentation available at " "https://support.zenoss.com/hc/en-us/articles/203263689") parser.add_argument("-v10", "--debug", action="store_true", default=False, help="verbose log output (debug logging)") parser.add_argument("-l", "--list", action="store_true", default=False, help="output all supported reIndex() types") parser.add_argument("-t", "--type", action="store", default="", help="specify which type to reIndex()") return vars(parser.parse_args()) def main(): '''Performs reindex call on different DMD categories (used to be a part of zencatalogscan)''' execution_start = time.time() cli_options = parse_options() log = configure_logging('zenindextool') log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) # Attempt to get the zenoss.toolbox lock before any actions performed if not get_lock("zenoss.toolbox", log): sys.exit(1) # Obtain dmd ZenScriptBase connection dmd = ZenScriptBase(noopts=True, connect=True).dmd log.debug("ZenScriptBase connection obtained") any_issue = False # Else build list of catalogs, then process catalog(s) and perform reindex if --fix types_to_reIndex = { 'Devices': 'dmd.Devices', 'Events': 'dmd.Events', 'Manufacturers': 'dmd.Manufacturers', 'Networks': 'dmd.Networks', 'Services': 'dmd.Services' } if cli_options['list'] or not cli_options['type'] : # Output list of present catalogs to the UI, perform no further operations print "List of dmd types that support reIndex() calls from this script:\n" print "\n".join(types_to_reIndex.keys()) log.info("Zenreindextool finished - list of supported types output to CLI") else: if cli_options['type'] in types_to_reIndex.keys(): reindex_dmd_objects(cli_options['type'], types_to_reIndex[cli_options['type']], dmd, log) else: print("Type '%s' unrecognized - unable to reIndex()" % (cli_options['type'])) log.error("CLI input '%s' doesn't match recognized types" % (cli_options['type'])) exit(1) # Print final status summary, update log file with termination block print("\n[%s] Execution finished in %s\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), datetime.timedelta(seconds=int(time.time() - execution_start)))) log.info("zenindextool completed in %1.2f seconds" % (time.time() - execution_start)) log.info("############################################################") if __name__ == "__main__": main()
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/src/zenoss/toolbox/zenindextool.py
zenindextool.py
#!/opt/zenoss/bin/python scriptVersion = "1.2.1" import argparse import datetime import Globals import logging import os import socket import sys import time import traceback import transaction from Products.ZenUtils.ZenScriptBase import ZenScriptBase from ZODB.transact import transact def configure_logging(scriptname): '''Configure logging for zenoss.toolbox tool usage''' # Confirm /tmp, $ZENHOME and check for $ZENHOME/log/toolbox (create if needed) if not os.path.exists('/tmp'): print "/tmp doesn't exist - aborting" exit(1) zenhome_path = os.getenv("ZENHOME") if not zenhome_path: print "$ZENHOME undefined - are you running as the zenoss user?" exit(1) log_file_path = os.path.join(zenhome_path, 'log', 'toolbox') if not os.path.exists(log_file_path): os.makedirs(log_file_path) # Setup "trash" toolbox log file (needed for ZenScriptBase log overriding) logging.basicConfig(filename='/tmp/toolbox.log.tmp', filemode='w', level=logging.INFO) # Create full path filename string for logfile, create RotatingFileHandler toolbox_log = logging.getLogger("%s" % (scriptname)) toolbox_log.setLevel(logging.INFO) log_file_name = os.path.join(zenhome_path, 'log', 'toolbox', '%s.log' % (scriptname)) handler = logging.handlers.RotatingFileHandler(log_file_name, maxBytes=8192*1024, backupCount=5) # Set logging.Formatter for format and datefmt, attach handler formatter = logging.Formatter('%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.setLevel(logging.DEBUG) toolbox_log.addHandler(handler) # Print initialization string to console, log status to logfile print("\n[%s] Initializing %s (detailed log at %s)\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), scriptname, log_file_name)) toolbox_log.info("Initializing %s" % (scriptname)) return toolbox_log def get_lock(process_name, log): '''Global lock function to keep multiple tools from running at once''' global lock_socket lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) try: lock_socket.bind('\0' + process_name) log.debug("Acquired '%s' execution lock" % (process_name)) except socket.error: print("[%s] Unable to acquire %s socket lock - are other tools already running?\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), process_name)) log.error("'%s' lock already exists - unable to acquire - exiting" % (process_name)) log.info("############################################################") return False return True def inline_print(message): '''Print message on a single line using sys.stdout.write, .flush''' sys.stdout.write("\r%s" % (message)) sys.stdout.flush() def scan_progress_message(done, fix, cycle, catalog, issues, chunk, log): '''Handle output to screen and logfile, remove output from scan_catalog logic''' # Logic for log file output messages based on done, issues if not done: log.debug("Scan of %s catalog is %2d%% complete" % (catalog, 2*chunk)) else: if issues > 0: log.warning("Scanned %s - found %d issue(s)" % (catalog, issues)) else: log.info("No issues found scanning: %s" % (catalog)) log.debug("Scan of %s catalog is complete" % (catalog)) # Logic for screen output messages based on done, issues, and fix if issues > 0: if fix: if not done: inline_print("[%s] Cleaning [%-50s] %3d%% [%d Issues Detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] Clean #%2.0d [%-50s] %3.0d%% [%d Issues Detected]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), cycle, '='*50, 100, issues)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% [%d Issues Detected]" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk, issues)) else: inline_print("[%s] WARNING [%-50s] %3.0d%% [%d Issues Detected]\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100, issues)) else: if not done: inline_print("[%s] Scanning [%-50s] %3d%% " % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*chunk, 2*chunk)) else: inline_print("[%s] Verified [%-50s] %3.0d%%\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), '='*50, 100)) def scan_catalog(catalog_name, catalog_list, fix, max_cycles, dmd, log): """Scan through a catalog looking for broken references""" catalog = catalog_list[0] initial_catalog_size = catalog_list[1] print("[%s] Examining %-35s (%d Objects)" % (time.strftime("%Y-%m-%d %H:%M:%S"), catalog_name, initial_catalog_size)) log.info("Examining %s catalog with %d objects" % (catalog_name, initial_catalog_size)) number_of_issues = -1 current_cycle = 0 if not fix: max_cycles = 1 while ((current_cycle < max_cycles) and (number_of_issues != 0)): number_of_issues = 0 current_cycle += 1 if (fix): log.info("Beginning cycle %d for catalog %s" % (current_cycle, catalog_name)) scanned_count = 0 progress_bar_chunk_size = 1 # ZEN-12165: show progress bar immediately before 'for' time overhead, before loading catalog scan_progress_message(False, fix, current_cycle, catalog_name, 0, 0, log) try: brains = catalog() catalog_size = len(brains) if (catalog_size > 50): progress_bar_chunk_size = (catalog_size//50) + 1 except Exception: raise for brain in brains: scanned_count += 1 if (scanned_count % progress_bar_chunk_size) == 0: chunk_number = scanned_count // progress_bar_chunk_size scan_progress_message(False, fix, current_cycle, catalog_name, number_of_issues, chunk_number, log) try: test_reference = brain.getObject() test_reference._p_deactivate() except Exception: number_of_issues += 1 object_path_string = brain.getPath() log.error("Catalog %s contains broken object %s" % (catalog_name, object_path_string)) if fix: log.info("Attempting to uncatalog %s" % (object_path_string)) try: transact(catalog.uncatalog_object)(object_path_string) except Exception as e: log.exception(e) # Final transaction.abort() to try and free up used memory log.debug("Calling transaction.abort() to minimize memory footprint") transaction.abort() scan_progress_message(True, fix, current_cycle, catalog_name, number_of_issues, chunk_number, log) if number_of_issues > 0: return True return False def build_catalog_dict(dmd, log): """Builds a list of catalogs present and > 0 objects""" catalogs_to_check = { 'CiscoUCS.ucsSearchCatalog': 'dmd.Devices.CiscoUCS.ucsSearchCatalog', 'CloudStack.HostCatalog': 'dmd.Devices.CloudStack.HostCatalog', 'CloudStack.RouterVMCatalog': 'dmd.Devices.CloudStack.RouterVMCatalog', 'CloudStack.SystemVMCatalog': 'dmd.Devices.CloudStack.SystemVMCatalog', 'CloudStack.VirtualMachineCatalog': 'dmd.Devices.CloudStack.VirtualMachineCatalog', 'Devices.deviceSearch': 'dmd.Devices.deviceSearch', 'Devices.searchRRDTemplates': 'dmd.Devices.searchRRDTemplates', 'Events.eventClassSearch': 'dmd.Events.eventClassSearch', 'global_catalog': 'dmd.global_catalog', 'HP.Proliant.deviceSearch': 'dmd.Devices.Server.HP.Proliant.deviceSearch', 'IPv6Networks.ipSearch': 'dmd.IPv6Networks.ipSearch', 'JobManager.job_catalog': 'dmd.JobManager.job_catalog', 'Layer2.macs_catalog': 'dmd.Devices.macs_catalog', 'maintenanceWindowSearch': 'dmd.maintenanceWindowSearch', 'Manufacturers.productSearch': 'dmd.Manufacturers.productSearch', 'Mibs.mibSearch': 'dmd.Mibs.mibSearch', 'Networks.ipSearch': 'dmd.Networks.ipSearch', 'Services.serviceSearch': 'dmd.Services.serviceSearch', 'Storage.iqnCatalog': 'dmd.Devices.Storage.iqnCatalog', 'Storage.wwnCatalog': 'dmd.Devices.Storage.wwnCatalog', 'vCloud.vCloudVMSearch': 'dmd.Devices.vCloud.vCloudVMSearch', 'VMware.vmwareGuestSearch': 'dmd.Devices.VMware.vmwareGuestSearch', 'vSphere.lunCatalog': 'dmd.Devices.vSphere.lunCatalog', 'vSphere.pnicCatalog': 'dmd.Devices.vSphere.pnicCatalog', 'vSphere.vnicCatalog': 'dmd.Devices.vSphere.vnicCatalog', 'XenServer.PIFCatalog': 'dmd.Devices.XenServer.PIFCatalog', 'XenServer.VIFCatalog': 'dmd.Devices.XenServer.VIFCatalog', 'XenServer.XenServerCatalog': 'dmd.Devices.XenServer.XenServerCatalog', 'ZenLinkManager.layer2_catalog': 'dmd.ZenLinkManager.layer2_catalog', 'ZenLinkManager.layer3_catalog': 'dmd.ZenLinkManager.layer3_catalog', 'zenPackPersistence': 'dmd.zenPackPersistence' } log.debug("Checking %d supported catalogs for (presence, not empty)" % (len(catalogs_to_check))) intermediate_catalog_dict = {} for catalog in catalogs_to_check.keys(): try: temp_brains = eval(catalogs_to_check[catalog]) if len(temp_brains) > 0: log.debug("Catalog %s exists, has items - adding to list" % (catalog)) intermediate_catalog_dict[catalog] = [eval(catalogs_to_check[catalog]), len(temp_brains)] else: log.debug("Skipping catalog %s - exists but has no items" % (catalog)) except AttributeError: log.debug("Skipping catalog %s - catalog not found" % (catalog)) except Exception, e: log.exception(e) return intermediate_catalog_dict def parse_options(): """Defines command-line options for script """ parser = argparse.ArgumentParser(version=scriptVersion, description="Scans catalogs for broken references. WARNING: Before using with --fix " "you must first confirm zodbscan, findposkeyerror, and zenrelationscan return " "clean. Documentation at " "https://support.zenoss.com/hc/en-us/articles/203118075") parser.add_argument("-v10", "--debug", action="store_true", default=False, help="verbose log output (debug logging)") parser.add_argument("-f", "--fix", action="store_true", default=False, help="attempt to remove any invalid references") parser.add_argument("-n", "--cycles", action="store", default="12", type=int, help="maximum times to cycle (with --fix)") parser.add_argument("-l", "--list", action="store_true", default=False, help="output all supported catalogs") parser.add_argument("-c", "--catalog", action="store", default="", help="only scan/fix specified catalog") return vars(parser.parse_args()) def main(): '''Scans catalogs for broken references. If --fix, attempts to remove broken references. Builds list of available non-empty catalogs. If --reindex, attempts dmd.reIndex().''' execution_start = time.time() cli_options = parse_options() log = configure_logging('zencatalogscan') log.info("Command line options: %s" % (cli_options)) if cli_options['debug']: log.setLevel(logging.DEBUG) # Attempt to get the zenoss.toolbox lock before any actions performed if not get_lock("zenoss.toolbox", log): sys.exit(1) # Obtain dmd ZenScriptBase connection dmd = ZenScriptBase(noopts=True, connect=True).dmd log.debug("ZenScriptBase connection obtained") any_issue = False unrecognized_catalog = False # Build list of catalogs, then process catalog(s) and perform reindex if --fix present_catalog_dict = build_catalog_dict(dmd, log) if cli_options['list']: # Output list of present catalogs to the UI, perform no further operations print "List of supported Zenoss catalogs to examine:\n" print "\n".join(present_catalog_dict.keys()) log.info("Zencatalogscan finished - list of supported catalogs output to CLI") else: # Scan through catalog(s) depending on --catalog parameter if cli_options['catalog']: if cli_options['catalog'] in present_catalog_dict.keys(): # Catalog provided as parameter is present - scan just that catalog any_issue = scan_catalog(cli_options['catalog'], present_catalog_dict[cli_options['catalog']], cli_options['fix'], cli_options['cycles'], dmd, log) else: unrecognized_catalog = True print("Catalog '%s' unrecognized - unable to scan" % (cli_options['catalog'])) log.error("CLI input '%s' doesn't match recognized catalogs" % (cli_options['catalog'])) else: # Else scan for all catalogs in present_catalog_dict for catalog in present_catalog_dict.keys(): any_issue = scan_catalog(catalog, present_catalog_dict[catalog], cli_options['fix'], cli_options['cycles'], dmd, log) or any_issue # Print final status summary, update log file with termination block print("\n[%s] Execution finished in %s\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), datetime.timedelta(seconds=int(time.time() - execution_start)))) log.info("zencatalogscan completed in %1.2f seconds" % (time.time() - execution_start)) log.info("############################################################") if any_issue and not cli_options['fix']: print("** WARNING ** Issues were detected - Consult KB article at") print(" https://support.zenoss.com/hc/en-us/articles/203118075\n") sys.exit(1) else: sys.exit(0) if __name__ == "__main__": main()
zenoss.toolbox
/zenoss.toolbox-0.5.2.tar.gz/zenoss.toolbox-0.5.2/src/zenoss/toolbox/zencatalogscan.py
zencatalogscan.py
# Zenpass [![PyPI](https://img.shields.io/pypi/v/zenpass)](https://pypi.python.org/pypi/zenpass) [![Pypi - License](https://img.shields.io/github/license/codesrg/zenpass)](https://github.com/codesrg/zenpass/blob/main/LICENSE) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/zenpass?color=red)](https://pypi.python.org/pypi/zenpass) To generate random and strong passwords. ## Installation `pip install -U zenpass` ## Usage ``` usage: zenpass [options] optional arguments: -h, --help show this help message and exit -v, --version show version number and exit. to customize Password: -l , --length to set length to the password -n , --ignore to ignore unwanted characters to the password -i , --include to include characters to the password -o , --only to create password only using wanted characters -s , --separator the separator character -c , --seplen the length of characters between separator --repeat to repeat the characters in the password (default : False) --separation to separate password characters using separator (default : False) --show to show password (default : False) keywords: [alphabets, uppercase, lowercase, numbers, symbols] can be given as input for following params: ignore, include, only ``` ### Python Script To generate a random password. ``` from zenpass import PasswordGenerator pg = PasswordGenerator() pg.generate() ``` ### Command Line To generate a random password. ``` $ zenpass Password copied to clipboard. ``` ### To set the password length, Default password length is `8-16`. ``` $ zenpass -l 10 --show Password: Q3m/vro|uR Password copied to clipboard. ``` ### Whether the characters in passwords repeat or not, Default value of `repeat` is `False`. ``` $ zenpass -r --show Password: 96Ndl;1D$jQu4Z2 Password copied to clipboard. ``` ### To include, ignore or use only `'alphabets'`, `'numbers'`, `'uppercase'`, `'lowercase'`, `'symbols'` and `random characters` in generating password. ### To ignore `numbers` in passwords. ``` $ zenpass -n numbers --show Password: uyMXP‘$!ZSCYqzj Password copied to clipboard. ``` ### To ignore characters `a,b,c,d,e` ``` $ zenpass -n abcde --show Password: ~}t"R‘jF'ksG8~E Password copied to clipboard. ``` ### To create a password only using `special characters`. ``` $ zenpass -o symbols -l 15 --show Password: ?)".=-_^[_‘~{.) Password copied to clipboard. ``` ### To include `a,b,c,d,e` characters in a password. ``` $ zenpass -o numbers -i abcde -l 15 --show Password: 78713d1e3d926a3 Password copied to clipboard. ``` ### To separate characters in a password using separator. ``` $ zenpass -o uppercase --separation -l 16 --show Password: YNQC-RKBF-DMAT-UVIP Password copied to clipboard. ``` ### To separate characters in a password using separator `_` with `5` characters between each separator. ``` $ zenpass -o uppercase --separation -l 15 -s _ -c 5 --show Password: YNQCR_KBFDM_ATUVI Password copied to clipboard. ``` ## Issues: If you encounter any problems, please file an [issue](https://github.com/codesrg/zenpass/issues) along with a detailed description.
zenpass
/zenpass-1.0.5.tar.gz/zenpass-1.0.5/README.md
README.md
[![Build Status](https://travis-ci.org/facetoe/zenpy.svg?branch=master)](https://travis-ci.org/facetoe/zenpy) # Zenpy Zenpy is a Python wrapper for the Zendesk, Chat and HelpCentre APIs. The goal of the project is to make it easy to write clean, fast, Pythonic code when interacting with Zendesk progmatically. The wrapper tries to keep API calls to a minimum. Wherever it makes sense objects are cached, and attributes of objects that would trigger an API call are evaluated lazily. Zenpy supports both Python2 and Python3. Please report bugs! * [Quickstart](#quickstart) * [Examples](#examples) * Ticketing * [Creating a ticket with a different requester](#creating-a-ticket-with-a-different-requester) * [Commenting on a ticket](#commenting-on-a-ticket) * [Adding a HTML comment to a ticket](#adding-a-html-comment-to-a-ticket) * [Appending tags to a ticket](#appending-tags-to-a-ticket) * [Uploading an attachment](#uploading-an-attachment) * [Creating a ticket with a custom field set](#creating-a-ticket-with-a-custom-field-set) * [Updating a custom field on a ticket](#updating-a-custom-field-on-a-ticket) * [Applying a Macro to a ticket](#applying-a-macro-to-a-ticket) * Users * [Adding a photo to a user](#adding-a-photo-to-a-user) * Help center * [List all categories from help center](#List-all-categories-from-help-center) * [List all help center articles](#List-all-help-center-articles) * [List all help center articles in a section](#List-all-help-center-articles-in-a-section) * [Create new category in help center](#Create-new-category-in-help-center) * [Create new section in help center](#Create-new-section-in-help-center) * [Create new article in help center](#Create-new-article-in-help-center) * Other * [Working with webhooks](#Working-with-webhooks) * [Pagination](#Pagination) * [Documentation](#documentation) * [Contributions](#contributions) ## Quickstart ```python from zenpy import Zenpy from zenpy.lib.api_objects import Ticket # Create a Zenpy instance zenpy_client = Zenpy(**credentials) # Create a new ticket zenpy_client.tickets.create(Ticket(subject="Important", description="Thing")) # Perform a simple search for ticket in zenpy_client.search('PC LOAD LETTER', type='ticket', assignee='facetoe'): # No need to mess around with ids, linked objects can be accessed directly. print(ticket.requester.name) # All objects can be converted to a Python dict. print(ticket.to_dict()) # Or to JSON. print(ticket.to_json()) ``` ## Examples ##### Searching open and pending tickets for a specific user and sort them by descending ```python zenpy_client.search(type='ticket', status_less_than='closed', assignee='[email protected]', sort_order='desc') ``` ##### Searching only opened tickets ```python zenpy_client.search(type='ticket', status='open') ``` ##### Exporting all tickets matching the query By default, Search API has a limit of 1000 results in total. Search Export API allows exporting unlimited number of results, so if you'd like to export all results, use this method instead: ```python for ticket in zenpy_client.search_export(type='ticket', status='open'): print(ticket) ``` Read more about these limitations: [Search results limits](https://developer.zendesk.com/api-reference/ticketing/ticket-management/search/#results-limit) [Search Export API release notes](https://support.zendesk.com/hc/en-us/articles/4408825120538-Support-API-Announcing-the-Export-Search-Results-endpoint-) ##### Creating a ticket with a different requester ```python from zenpy.lib.api_objects import Ticket, User zenpy_client.tickets.create( Ticket(description='Some description', requester=User(name='bob', email='[email protected]')) ) ``` ##### Commenting on a ticket ```python from zenpy.lib.api_objects import Comment ticket = zenpy_client.tickets(id=some_ticket_id) ticket.comment = Comment(body="Important private comment", public=False) zenpy_client.tickets.update(ticket) ``` ##### Adding a HTML comment to a ticket ```python from zenpy.lib.api_objects import Ticket, Comment zenpy_client.tickets.create(Ticket( subject='Html comment example', comment=Comment(body='The smoke is very colorful', html_body='<h2>The smoke is <i>very</i> colourful</h2>')) ) ``` ##### Appending tags to a ticket ```python from zenpy.lib.api_objects import Ticket ticket = zenpy_client.tickets(id=some_ticket_id) ticket.tags.extend(['onetag', 'twotag', 'threetag', 'four']) zenpy_client.tickets.update(ticket) ``` ##### Uploading an attachment ```python from zenpy.lib.api_objects import Comment # Upload the file (or file-like object) to Zendesk and obtain an Upload instance upload_instance = zenpy_client.attachments.upload('/tmp/awesome_file.txt') ticket = zenpy_client.tickets(id=some_ticket_id) ticket.comment = Comment(body='This comment has my file attached', uploads=[upload_instance.token]) zenpy_client.tickets.update(ticket) ``` ##### Creating a ticket with a custom field set ```python from zenpy.lib.api_objects import CustomField, Ticket ticket_audit = zenpy_client.tickets.create(Ticket( subject='Has custom field', description="Wow, such field", custom_fields=[CustomField(id=43528467, value=1337)] )) ``` ##### Updating a custom field on a ticket ```python from zenpy.lib.api_objects import CustomField ticket = zenpy_client.tickets(id=some_ticket_id) ticket.custom_fields.append(CustomField(id=43528467, value=1337)) zenpy_client.tickets.update(ticket) ``` ##### Applying a Macro to a ticket ```python # Execute the show_macro_effect() method which returns what the macro *would* do. # The method accepts either Zenpy objects or ids. macro_result = zenpy_client.tickets.show_macro_effect(ticket_id_or_object, macro_id_or_object) # Update the ticket to actually change the ticket. zenpy_client.tickets.update(macro_result.ticket) ``` ##### Adding a photo to a user ```python user = zenpy_client.users(id=user_id) user.remote_photo_url = 'http://domain/example_photo.jpg' zenpy_client.users.update(user) ``` ##### List all categories from help center ```python categories = zenpy_client.help_center.categories() for category in categories: pass ``` ##### List all help center articles ```python articles = zenpy_client.help_center.articles(section=section) for article in articles: pass ``` ##### List all help center articles in a section ```python section = zenpy_client.help_center.categories.sections(category_id=category.id) articles = zenpy_client.help_center.sections.articles(section=section) for article in articles: pass ``` ##### Create new category in help center ```python from zenpy import Zenpy from zenpy.lib.api_objects.help_centre_objects import Category new_category = zenpy_client.help_center.categories.create( Category( name="Category name", description="Category description", locale="en-us", created_at=datetime.now(), updated_at=datetime.now() ) ) print(new_category.to_dict(serialize=True)) ``` ##### Create new section in help center ```python from zenpy import Zenpy from zenpy.lib.api_objects.help_centre_objects import Section new_section = zenpy_client.help_center.sections.create( Section( name="Section name", description="Section description", category_id=new_category.id, locale="en-us", created_at=datetime.now(), updated_at=datetime.now() ) ) print(new_section.to_dict(serialize=True)) ``` ##### Create new article in help center ```python from zenpy import Zenpy from zenpy.lib.api_objects.help_centre_objects import Article new_article = zenpy_client.help_center.articles.create( section=new_section.id, article=Article( name="Article Name", body="<p>Article html content body</p>", locale="en-us", title="Article title", section_id=new_section.id, created_at=datetime.now(), updated_at=datetime.now() ), ) print(new_article.to_dict(serialize=True)) ``` ##### Working with webhooks ###### Show a webhook ```python webhook = zenpy_client.webhooks(id=WEBHOOK_ID) ``` ###### List webhooks ```python # Just list all the webhooks for webhook in zenpy_client.webhooks.list(): pass # Do something with it # Filter the webhooks by a string in the name for webhook in zenpy_client.webhooks.list(filter='some string'): pass # Do something with it # Using sorting and pagination according to https://developer.zendesk.com/api-reference/event-connectors/webhooks/webhooks/#list-webhooks zenpy_client.webhooks.list(sort='name') zenpy_client.webhooks.list(page_before=X, page_size=Y) zenpy_client.webhooks.list(page_after=N, page_size=Y) ``` ###### Creating a webhook that uses basic authentication ```python from zenpy.lib.api_objects import Webhook new_webhook = Webhook( authentication={ "add_position": "header", "data": { "password": "hello_123", "username": "john_smith" }, "type": "basic_auth" }, endpoint="https://example.com/status/200", http_method="GET", name="Example Webhook", description="Webhook description", request_format="json", status="active", subscriptions=["conditional_ticket_events"], ) zenpy_client.webhooks.create(new_webhook) ``` ###### Creating a webhook that uses no authentication ```python new_webhook = Webhook( endpoint="https://example.com/status/200", http_method="GET", name="Example Webhook", description="Webhook description", request_format="json", status="active", subscriptions=["conditional_ticket_events"], ) zenpy_client.webhooks.create(new_webhook) ``` ###### Creating a webhook that uses bearer token authentication ```python new_webhook = Webhook( authentication={ "add_position": "header", "data": { "token": "{{token}}" }, "type": "bearer_token" }, # other fields ) zenpy_client.webhooks.create(new_webhook) ``` ###### Updating a webhook ```python from zenpy.lib.api_objects import Webhook webhook = zenpy_client.webhooks(id=WEBHOOK_ID) # Note: We need a brand new object because of API specific requirements for 'update' # https://developer.zendesk.com/api-reference/event-connectors/webhooks/webhooks/#update-webhook new_webhook = Webhook( name="New name", request_format="json", http_method="GET", endpoint="https://example.com/status/200", status="active", authentication={ "add_position": "header", "data": { "password": "hello_123", # As we can't get it back we need to pass it again from scratch "username": "john_smith" }, "type": "basic_auth" }, ) response = zenpy_client.webhooks.update(webhook.id, new_webhook) ``` ###### Partially updating (patching) a webhook ```python webhook = zenpy_client.webhooks(id=WEBHOOK_ID) webhook.name = 'A new name' response = zenpy_client.webhooks.patch(webhook) ``` ###### Cloning a webhook ```python from zenpy.lib.api_objects import Webhook an_existing_webhook = zenpy_client.webhooks(id=WEBHOOK_ID) new_webhook = zenpy_client.webhooks.clone(an_existing_webhook) # Or just new_webhook = zenpy_client.webhooks.clone(WEBHOOK_ID) ``` ###### Working with secrets ```python secret = zenpy_client.webhooks.show_secret(webhook) print(secret.secret) secret = zenpy_client.webhooks.reset_secret(webhook) print(secret.secret) ``` ###### Testing webhooks ```python # Testing an existing webhook "as is"" response = zenpy_client.webhooks.test(webhook) # Testing an existing webhook with modifications response = zenpy_client.webhooks.test( webhook, request=dict( endpoint='https://example.org/' ) ) # Sending a test request without creating a webhook response = zenpy_client.webhooks.test( request=dict( endpoint="https://example.org", request_format="json", http_method="GET", ) ) ``` ##### Pagination Please refer to the [official documentation](https://developer.zendesk.com/api-reference/introduction/pagination/) to get details. Also check this article: [Which endpoints are supported?](https://support.zendesk.com/hc/en-us/articles/4408846180634#h_01FF626TG8VD0W4JP9DBBSXESK) ```python # An old style offset pagination, not recommended. Since August 15, 2023, is limited to 100 pages. fields = zenpy_client.ticket_fields() # Or fields = zenpy_client.ticket_fields(cursor_pagination=False) # A new cursor offset pagination fields = zenpy_client.ticket_fields(cursor_pagination=True) # is equal to 100 results per page # Or fields = zenpy_client.ticket_fields(cursor_pagination=50) # 50 results per page ``` ## Documentation Check out the [documentation](http://docs.facetoe.com.au/) for more info. ### Contributions Contributions are very welcome. I've written an explanation of the core ideas of the wrapper in the [Contributors Guide](https://github.com/facetoe/zenpy/wiki/Contributors-Guide).
zenpy
/zenpy-2.0.35.tar.gz/zenpy-2.0.35/README.md
README.md
import logging import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3 import Retry from zenpy.lib.api import ( UserApi, Api, TicketApi, OrganizationApi, SuspendedTicketApi, EndUserApi, TicketImportAPI, RequestAPI, OrganizationMembershipApi, AttachmentApi, SharingAgreementAPI, SatisfactionRatingApi, MacroApi, GroupApi, ViewApi, SlaPolicyApi, ChatApi, GroupMembershipApi, HelpCentreApi, RecipientAddressApi, NpsApi, TicketFieldApi, TriggerApi, AutomationApi, DynamicContentApi, TargetApi, BrandApi, TicketFormApi, OrganizationFieldsApi, JiraLinkApi, SkipApi, TalkApi, TalkPEApi, CustomAgentRolesApi, SearchApi, SearchExportApi, UserFieldsApi, ZISApi, WebhooksApi, LocalesApi ) from zenpy.lib.cache import ZenpyCache, ZenpyCacheManager from zenpy.lib.endpoint import EndpointFactory from zenpy.lib.exception import ZenpyException from zenpy.lib.mapping import ZendeskObjectMapping log = logging.getLogger() __author__ = "facetoe" __version__ = "2.0.10" class Zenpy(object): """""" DEFAULT_TIMEOUT = 60.0 def __init__( self, domain="zendesk.com", subdomain=None, email=None, token=None, oauth_token=None, password=None, session=None, anonymous=False, timeout=None, ratelimit_budget=None, proactive_ratelimit=None, proactive_ratelimit_request_interval=10, disable_cache=False, ): """ Python Wrapper for the Zendesk API. There are several ways to authenticate with the Zendesk API: * Email and password * Email and Zendesk API token * Email and OAuth token * Existing authenticated Requests Session object. :param subdomain: your Zendesk subdomain :param email: email address :param token: Zendesk API token :param oauth_token: OAuth token :param password: Zendesk password :param session: existing Requests Session object :param timeout: global timeout on API requests. :param ratelimit_budget: maximum time to spend being rate limited :param proactive_ratelimit: user specified rate limit. :param proactive_ratelimit_request_interval: seconds to wait when over proactive_ratelimit. :param disable_cache: disable caching of objects """ session = self._init_session(email, token, oauth_token, password, session, anonymous) timeout = timeout or self.DEFAULT_TIMEOUT self.cache = ZenpyCacheManager(disable_cache) config = dict( domain=domain, subdomain=subdomain, session=session, timeout=timeout, ratelimit=int(proactive_ratelimit) if proactive_ratelimit is not None else None, ratelimit_budget=int(ratelimit_budget) if ratelimit_budget is not None else None, ratelimit_request_interval=int(proactive_ratelimit_request_interval), cache=self.cache, ) self.users = UserApi(config) self.user_fields = UserFieldsApi(config) self.groups = GroupApi(config) self.macros = MacroApi(config) self.organizations = OrganizationApi(config) self.organization_memberships = OrganizationMembershipApi(config) self.organization_fields = OrganizationFieldsApi(config) self.tickets = TicketApi(config) self.suspended_tickets = SuspendedTicketApi( config, object_type="suspended_ticket" ) self.search = SearchApi(config) self.search_export = SearchExportApi(config) self.topics = Api(config, object_type="topic") self.attachments = AttachmentApi(config) self.brands = BrandApi(config, object_type="brand") self.job_status = Api( config, object_type="job_status", endpoint=EndpointFactory("job_statuses") ) self.jira_links = JiraLinkApi(config) self.tags = Api(config, object_type="tag") self.satisfaction_ratings = SatisfactionRatingApi(config) self.sharing_agreements = SharingAgreementAPI(config) self.skips = SkipApi(config) self.activities = Api(config, object_type="activity") self.group_memberships = GroupMembershipApi(config) self.end_user = EndUserApi(config) self.ticket_metrics = Api(config, object_type="ticket_metric") self.ticket_metric_events = Api(config, object_type="ticket_metric_events") self.ticket_fields = TicketFieldApi(config) self.ticket_forms = TicketFormApi(config, object_type="ticket_form") self.ticket_import = TicketImportAPI(config) self.requests = RequestAPI(config) self.chats = ChatApi(config, endpoint=EndpointFactory("chats")) self.views = ViewApi(config) self.sla_policies = SlaPolicyApi(config) self.help_center = HelpCentreApi(config) self.recipient_addresses = RecipientAddressApi(config) self.nps = NpsApi(config) self.triggers = TriggerApi(config, object_type="trigger") self.automations = AutomationApi(config, object_type="automation") self.dynamic_content = DynamicContentApi(config) self.targets = TargetApi(config, object_type="target") self.talk = TalkApi(config) self.talk_pe = TalkPEApi(config) self.custom_agent_roles = CustomAgentRolesApi( config, object_type="custom_agent_role" ) self.zis = ZISApi(config) self.webhooks = WebhooksApi(config) self.locales = LocalesApi(config) @staticmethod def http_adapter_kwargs(): """ Provides Zenpy's default HTTPAdapter args for those users providing their own adapter. """ return dict( # Transparently retry requests that are safe to retry, with the exception of 429. This is handled # in the Api._call_api() method. max_retries=Retry( total=3, status_forcelist=[ r for r in Retry.RETRY_AFTER_STATUS_CODES if r != 429 ], respect_retry_after_header=False, ) ) def _init_session(self, email, token, oath_token, password, session, anonymous): if not session: session = requests.Session() # Workaround for possible race condition - https://github.com/kennethreitz/requests/issues/3661 session.mount("https://", HTTPAdapter(**self.http_adapter_kwargs())) if (not hasattr(session, "authorized") or not session.authorized) and not anonymous: # session is not an OAuth session that has been authorized, so authorize the session. if not password and not token and not oath_token: raise ZenpyException( "password, token or oauth_token are required! {}".format(locals()) ) elif password and token: raise ZenpyException("Password and token are mutually exclusive!") if password: session.auth = (email, password) elif token: session.auth = ("%s/token" % email, token) elif oath_token: session.headers.update({"Authorization": "Bearer %s" % oath_token}) else: raise ZenpyException("Invalid arguments to _init_session()!") # If a session with a custom user agent has been passed, don't clobber it if "User-Agent" in session.headers: user_agent = session.headers.get("User-Agent") if user_agent == requests.utils.default_user_agent(): user_agent = "Zenpy/{}".format(__version__) session.headers.update({"User-Agent": user_agent}) return session def get_cache_names(self): """ Returns a list of current caches """ return self.cache.mapping.keys() def get_cache_max(self, cache_name): """ Returns the maxsize attribute of the named cache """ return self._get_cache(cache_name).maxsize def set_cache_max(self, cache_name, maxsize, **kwargs): """ Sets the maxsize attribute of the named cache """ cache = self._get_cache(cache_name) cache.set_maxsize(maxsize, **kwargs) def get_cache_impl_name(self, cache_name): """ Returns the name of the cache implementation for the named cache """ return self._get_cache(cache_name).impl_name def set_cache_implementation(self, cache_name, impl_name, maxsize, **kwargs): """ Changes the cache implementation for the named cache """ self._get_cache(cache_name).set_cache_impl(impl_name, maxsize, **kwargs) def add_cache(self, object_type, cache_impl_name, maxsize, **kwargs): """ Add a new cache for the named object type and cache implementation """ if object_type not in ZendeskObjectMapping.class_mapping: raise ZenpyException("No such object type: %s" % object_type) self.cache.mapping[object_type] = ZenpyCache(cache_impl_name, maxsize, **kwargs) def delete_cache(self, cache_name): """ Deletes the named cache """ del self.cache.mapping[cache_name] def purge_cache(self, cache_name): """ Purges the named cache. """ self.cache.purge_cache(cache_name) def disable_caching(self): """ Disable caching of objects. """ self.cache.disable() def enable_caching(self): """ Enable caching of objects. """ self.cache.enable() def caching_status(self): """ Returns caching status. """ self.cache.status() def caching_engines(self): """ Returns available caching engines. """ self.cache.get_cache_engines() def _get_cache(self, cache_name): if cache_name not in self.cache.mapping: raise ZenpyException("No such cache - %s" % cache_name) else: return self.cache.mapping[cache_name]
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/__init__.py
__init__.py
import calendar import datetime import logging import re import pytz from datetime import datetime, date from zenpy.lib.proxy import ProxyDict, ProxyList FIRST_CAP_REGEX = re.compile('(.)([A-Z][a-z]+)') ALL_CAP_REGEX = re.compile('([a-z0-9])([A-Z])') log = logging.getLogger(__name__) def to_snake_case(name): """ Given a name in camelCase return in snake_case """ s1 = FIRST_CAP_REGEX.sub(r'\1_\2', name) return ALL_CAP_REGEX.sub(r'\1_\2', s1).lower() def to_unix_ts(start_time): """Given a datetime object, returns its value as a unix timestamp""" if isinstance(start_time, datetime): if is_timezone_aware(start_time): start_time = start_time.astimezone(pytz.utc) else: log.warning( "Non timezone-aware datetime object passed to IncrementalEndpoint. " "The Zendesk API expects UTC time, if this is not the case results will be incorrect!" ) unix_time = calendar.timegm(start_time.timetuple()) else: unix_time = start_time return int(unix_time) def get_object_type(zenpy_object): """ Given an instance of a Zenpy object, return it's object type """ return to_snake_case(zenpy_object.__class__.__name__) def is_timezone_aware(datetime_obj): """ Determine whether or not a given datetime object is timezone aware. """ return datetime_obj.tzinfo is not None and datetime_obj.tzinfo.utcoffset( datetime_obj) is not None def is_iterable_but_not_string(obj): """ Determine whether or not obj is iterable but not a string (eg, a list, set, tuple etc). """ return hasattr(obj, '__iter__') and not isinstance( obj, str) and not isinstance(obj, bytes) def as_singular(result_key): """ Given a result key, return in the singular form """ if result_key.endswith('ies'): return re.sub('ies$', 'y', result_key) elif result_key.endswith('uses'): return re.sub("uses$", "us", result_key) elif result_key.endswith('addresses'): # Special case for '*addresses' return result_key[:-2] elif result_key.endswith('s'): return result_key[:-1] else: return result_key def as_plural(result_key): """ Given a result key, return in the plural form. """ # Not at all guaranteed to work in all cases... if result_key.endswith('y'): return re.sub("y$", "ies", result_key) elif result_key.endswith('address'): return result_key + 'es' elif result_key.endswith('us'): return re.sub("us$", "uses", result_key) elif not result_key.endswith('s'): return result_key + 's' else: return result_key def get_endpoint_path(api, response): """ Return the section of the URL from 'api/v2' to the end. """ return response.request.url.split(api.api_prefix)[-1] def extract_id(*object_types): """ Decorator for extracting id from passed parameters for specific types. """ def outer(func): def inner(*args, **kwargs): def id_of(x): return x.id if type(x) in object_types else x new_args = [id_of(arg) for arg in args] new_kwargs = {k: id_of(v) for k, v in kwargs.items()} return func(*new_args, **new_kwargs) return inner return outer def json_encode_for_zendesk(obj): """ Only encode those attributes of Zenpy objects that have been modified. """ return json_encode(obj, serialize=True) def json_encode_for_printing(obj): """ Encode all attributes. """ return json_encode(obj, serialize=False) def json_encode(obj, serialize): """ Handle encoding complex types. """ if hasattr(obj, 'to_dict'): return obj.to_dict(serialize=serialize) elif isinstance(obj, datetime): return obj.date().isoformat() elif isinstance(obj, date): return obj.isoformat() elif isinstance(obj, ProxyDict): return dict(obj) elif isinstance(obj, ProxyList): return list(obj) elif is_iterable_but_not_string(obj): return list(obj) def all_are_none(*args): """ Check if all args are none. """ return all(arg is None for arg in args) def all_are_not_none(*args): """ Check if all args are not none. """ return all(arg is not None for arg in args)
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/util.py
util.py
from abc import abstractmethod from zenpy.lib.exception import ZenpyException from zenpy.lib.generator import SearchResultGenerator, ZendeskResultGenerator, ChatResultGenerator, ViewResultGenerator, \ TicketCursorGenerator, ChatIncrementalResultGenerator, JiraLinkGenerator, SearchExportResultGenerator, \ WebhookInvocationsResultGenerator, WebhooksResultGenerator, GenericCursorResultsGenerator from zenpy.lib.util import as_singular, as_plural, get_endpoint_path from six.moves.urllib.parse import urlparse class ResponseHandler(object): """ A ResponseHandler knows the type of response it can handle, how to deserialize it and also how to build the correct return type for the data received. Note: it is legal for multiple handlers to know how to process the same response. The handler that is ultimately chosen is determined by the order in the Api._response_handlers tuple. When adding a new handler, it is important to place the most general handlers last, and the most specific first. """ def __init__(self, api, object_mapping=None): self.api = api self.object_mapping = object_mapping or api._object_mapping @staticmethod @abstractmethod def applies_to(api, response): """ Subclasses should return True if they know how to deal with this response. """ @abstractmethod def deserialize(self, response_json): """ Subclasses should implement the necessary logic to deserialize the passed JSON and return the result. """ @abstractmethod def build(self, response): """ Subclasses should deserialize the objects here and return the correct type to the user. Usually this boils down to deciding whether or not we should return a ResultGenerator of a particular type, a list of objects or a single object. """ class GenericZendeskResponseHandler(ResponseHandler): """ The most generic handler for responses from the Zendesk API. """ @staticmethod def applies_to(api, response): try: return api.base_url in response.request.url and response.json() except ValueError: return False def deserialize(self, response_json): """ Locate and deserialize all objects in the returned JSON. Return a dict keyed by object_type. If the key is plural, the value will be a list, if it is singular, the value will be an object of that type. :param response_json: """ response_objects = dict() if all((t in response_json for t in ('ticket', 'audit'))): response_objects[ "ticket_audit"] = self.object_mapping.object_from_json( "ticket_audit", response_json) # Locate and store the single objects. for zenpy_object_name in self.object_mapping.class_mapping: if zenpy_object_name in response_json: zenpy_object = self.object_mapping.object_from_json( zenpy_object_name, response_json[zenpy_object_name]) response_objects[zenpy_object_name] = zenpy_object # Locate and store the collections of objects. for key, value in response_json.items(): if isinstance(value, list): zenpy_object_name = as_singular(key) if zenpy_object_name in self.object_mapping.class_mapping: response_objects[key] = [] for object_json in response_json[key]: zenpy_object = self.object_mapping.object_from_json( zenpy_object_name, object_json) response_objects[key].append(zenpy_object) return response_objects def build(self, response): """ Deserialize the returned objects and return either a single Zenpy object, or a ResultGenerator in the case of multiple results. :param response: the requests Response object. """ response_json = response.json() # Special case for incremental cursor based ticket audits export. if get_endpoint_path(self.api, response).startswith('/ticket_audits.json'): return TicketCursorGenerator(self, response_json, object_type="audit") # Special case for incremental cursor based tickets export. if get_endpoint_path( self.api, response).startswith('/incremental/tickets/cursor.json'): return TicketCursorGenerator(self, response_json, object_type="ticket") # Special case for Jira links. if get_endpoint_path(self.api, response).startswith('/services/jira/links'): return JiraLinkGenerator(self, response_json, response) zenpy_objects = self.deserialize(response_json) # Collection of objects (eg, users/tickets) plural_object_type = as_plural(self.api.object_type) if plural_object_type in zenpy_objects: meta = response_json.get('meta') if meta and meta.get('has_more') is not None: return GenericCursorResultsGenerator( self, response_json, response_objects=zenpy_objects[plural_object_type]) else: return ZendeskResultGenerator( self, response_json, response_objects=zenpy_objects[plural_object_type]) # Here the response matches the API object_type, seems legit. if self.api.object_type in zenpy_objects: return zenpy_objects[self.api.object_type] # Could be anything, if we know of this object then return it. for zenpy_object_name in self.object_mapping.class_mapping: if zenpy_object_name in zenpy_objects: return zenpy_objects[zenpy_object_name] # Maybe a collection of known objects? for zenpy_object_name in self.object_mapping.class_mapping: plural_zenpy_object_name = as_plural(zenpy_object_name) if plural_zenpy_object_name in zenpy_objects: return ZendeskResultGenerator( self, response_json, object_type=plural_zenpy_object_name) # Bummer, bail out. raise ZenpyException("Unknown Response: " + str(response_json)) class HTTPOKResponseHandler(ResponseHandler): """ The name is on the box, handles 200 responses. """ @staticmethod def applies_to(api, response): return response.status_code == 200 def deserialize(self, response_json): raise NotImplementedError( "HTTPOKResponseHandler cannot handle deserialization") def build(self, response): return response class ViewResponseHandler(GenericZendeskResponseHandler): """ Handles the various responses returned by the View endpoint. """ @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/views') def deserialize(self, response_json): deserialized_response = super(ViewResponseHandler, self).deserialize(response_json) if 'rows' in response_json: views = list() for row in response_json['rows']: views.append( self.object_mapping.object_from_json('view_row', row)) return views elif 'views' in deserialized_response: return deserialized_response['views'] elif 'tickets' in deserialized_response: return deserialized_response['tickets'] elif 'view_counts' in deserialized_response: return deserialized_response['view_counts'] elif 'view_count' in deserialized_response: return deserialized_response['view_count'] elif 'export' in deserialized_response: return deserialized_response['export'] else: return deserialized_response['view'] def build(self, response): response_json = response.json() if any([ key in response_json for key in ['rows', 'view_counts', 'tickets'] ]): return ViewResultGenerator(self, response_json) else: return self.deserialize(response_json) class DeleteResponseHandler(GenericZendeskResponseHandler): """ Yup, handles 204 No Content. """ @staticmethod def applies_to(api, response): return response.status_code == 204 def deserialize(self, response_json): raise NotImplementedError("Deserialize is not implemented for DELETE") def build(self, response): return response class SearchResponseHandler(GenericZendeskResponseHandler): """ Handles Zendesk search results. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) try: return result.path.endswith('search.json') and 'results' in response.json() except KeyError: return False def build(self, response): return SearchResultGenerator(self, response.json()) class SearchExportResponseHandler(GenericZendeskResponseHandler): """ Handles Zendesk search export results. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) try: return result.path.endswith('export.json') and 'results' in response.json() except KeyError: return False def build(self, response): return SearchExportResultGenerator(self, response.json()) class WebhooksResponseHandler(GenericZendeskResponseHandler): """ Handles webhook invocations results. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) try: return result.path.endswith('webhooks') and 'webhooks' in response.json() except KeyError: return False def build(self, response): return WebhooksResultGenerator(self, response.json()) class WebhookInvocationsResponseHandler(GenericZendeskResponseHandler): """ Handles webhook invocations results. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) return result.path.endswith('invocations') def build(self, response): return WebhookInvocationsResultGenerator(self, response.json()) class WebhookInvocationAttemptsResponseHandler(GenericZendeskResponseHandler): """ Handles webhook invocation attempts results. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) try: return result.path.endswith('attempts') and 'attempts' in response.json() except KeyError: return False def deserialize(self, response_json): key = 'attempts' response_objects = [] for object_json in response_json[key]: zenpy_object = self.object_mapping.object_from_json( 'invocation_attempt', object_json) response_objects.append(zenpy_object) return response_objects def build(self, response): response_json = response.json() return self.deserialize(response_json) class CountResponseHandler(GenericZendeskResponseHandler): """ Handles Zendesk search results counts. """ @staticmethod def applies_to(api, response): try: response_json = response.json() return len(response_json) == 1 and 'count' in response_json except ValueError: return False def build(self, response): return response.json()['count'] class CombinationResponseHandler(GenericZendeskResponseHandler): """ Handles a few special cases where the return type is made up of two objects. """ @staticmethod def applies_to(api, response): try: response_json = response.json() if 'job_status' in response_json: return True elif 'ticket' in response_json and 'audit' in response_json: return True except ValueError: return False def build(self, response): zenpy_objects = self.deserialize(response.json()) # JobStatus responses also include a ticket key so treat it specially. if 'job_status' in zenpy_objects: return zenpy_objects['job_status'] # TicketAudit responses are another special case containing both # a ticket and audit key. if 'ticket' and 'audit' in zenpy_objects: return zenpy_objects['ticket_audit'] raise ZenpyException("Could not process response: {}".format(response)) class JobStatusesResponseHandler(GenericZendeskResponseHandler): @staticmethod def applies_to(api, response): try: response_json = response.json() if 'job_statuses' in response_json: return True except ValueError: return False def build(self, response): response_objects = {'job_statuses': []} for object_json in response.json()['job_statuses']: zenpy_object = self.object_mapping.object_from_json( 'job_status', object_json) response_objects['job_statuses'].append(zenpy_object) return response_objects class TagResponseHandler(ResponseHandler): """ Tags aint complicated, just return them. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) return result.path.endswith('tags.json') def deserialize(self, response_json): return response_json['tags'] def build(self, response): return self.deserialize(response.json()) class SlaPolicyResponseHandler(GenericZendeskResponseHandler): @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/slas') def deserialize(self, response_json): if 'definitions' in response_json: definitions = self.object_mapping.object_from_json( 'definitions', response_json['definitions']) return dict(definitions=definitions) return super(SlaPolicyResponseHandler, self).deserialize(response_json) def build(self, response): response_json = response.json() response_objects = self.deserialize(response_json) if 'sla_policies' in response_objects: return ZendeskResultGenerator( self, response.json(), response_objects=response_objects['sla_policies']) elif 'sla_policy' in response_objects: return response_objects['sla_policy'] elif response_objects: return response_objects['definitions'] raise ZenpyException( "Could not handle response: {}".format(response_json)) class RequestCommentResponseHandler(GenericZendeskResponseHandler): @staticmethod def applies_to(api, response): endpoint_path = get_endpoint_path(api, response) return endpoint_path.startswith( '/requests') and endpoint_path.endswith('comments.json') def deserialize(self, response_json): return super(RequestCommentResponseHandler, self).deserialize(response_json) def build(self, response): response_json = response.json() response_objects = self.deserialize(response_json) return ZendeskResultGenerator(self, response_json, response_objects['comments'], object_type='comment') class ChatResponseHandler(ResponseHandler): """ Handles Chat responses. """ @staticmethod def applies_to(api, response): path = get_endpoint_path(api, response) return path.startswith('/chats') or path.startswith( '/incremental/chats') def deserialize(self, response_json): chats = list() if 'chats' in response_json: chat_list = response_json['chats'] elif 'docs' in response_json: chat_list = response_json['docs'].values() else: raise ZenpyException( "Unexpected response: {}".format(response_json)) for chat in chat_list: chats.append(self.object_mapping.object_from_json('chat', chat)) return chats def build(self, response): response_json = response.json() if 'chats' in response_json or 'docs' in response_json: if 'next_page' in response_json: return ChatIncrementalResultGenerator(self, response_json) else: return ChatResultGenerator(self, response_json) else: return self.object_mapping.object_from_json('chat', response_json) class AccountResponseHandler(ResponseHandler): """ Handles Chat API Account responses. """ @staticmethod def applies_to(api, response): _, endpoint_name = response.request.url.split(api.api_prefix) return endpoint_name.startswith('/account') def deserialize(self, response_json): return self.object_mapping.object_from_json('account', response_json) def build(self, response): return self.deserialize(response.json()) class ChatSearchResponseHandler(ResponseHandler): """ Yep, handles Chat API search responses. """ @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/chats/search') def deserialize(self, response_json): search_results = list() for result in response_json['results']: search_results.append( self.object_mapping.object_from_json('search_result', result)) return search_results def build(self, response): return ChatResultGenerator(self, response.json()) class ChatApiResponseHandler(ResponseHandler): """ Base class for Chat API responses that follow the same pattern. Subclasses need only define object type and implement applies_to(). """ object_type = None def deserialize(self, response_json): agents = list() if isinstance(response_json, dict): return self.object_mapping.object_from_json( self.object_type, response_json) else: for agent in response_json: agents.append( self.object_mapping.object_from_json( self.object_type, agent)) return agents def build(self, response): return self.deserialize(response.json()) class AgentResponseHandler(ChatApiResponseHandler): object_type = 'agent' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/agents') class VisitorResponseHandler(ChatApiResponseHandler): object_type = 'visitor' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/visitors') class ShortcutResponseHandler(ChatApiResponseHandler): object_type = 'shortcut' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/shortcuts') class TriggerResponseHandler(ChatApiResponseHandler): object_type = 'trigger' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/triggers') class BanResponseHandler(ChatApiResponseHandler): object_type = 'ban' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/bans') class DepartmentResponseHandler(ChatApiResponseHandler): object_type = 'department' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/departments') class GoalResponseHandler(ChatApiResponseHandler): object_type = 'goal' @staticmethod def applies_to(api, response): return get_endpoint_path(api, response).startswith('/goals') class ZISIntegrationResponseHandler(ResponseHandler): """ ZIS calls response handler. """ @staticmethod def applies_to(api, response): result = urlparse(response.request.url) return result.path.startswith('/api/services/zis/registry/') def deserialize(self, response_json): return self.object_mapping.object_from_json('integration', response_json) def build(self, response): if response.text: return self.deserialize(response.json()) else: return None class MissingTranslationHandler(ResponseHandler): @staticmethod def applies_to(api, response): return 'translations/missing.json' in get_endpoint_path(api, response) def build(self, response): return self.deserialize(response.json()) def deserialize(self, response_json): return response_json['locales']
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/response.py
response.py
import logging from threading import RLock import cachetools from zenpy.lib.api_objects import BaseObject from zenpy.lib.exception import ZenpyCacheException from zenpy.lib.util import get_object_type __author__ = 'facetoe' log = logging.getLogger(__name__) class ZenpyCache(object): """ Wrapper class for the various cachetools caches. Adds ability to change cache implementations on the fly and change the maxsize setting. """ AVAILABLE_CACHES = [ c for c in dir(cachetools) if c.endswith('Cache') and c != 'Cache' ] def __init__(self, cache_impl, maxsize, **kwargs): self.cache = self._get_cache_impl(cache_impl, maxsize, **kwargs) self.purge_lock = RLock() def set_cache_impl(self, cache_impl, maxsize, **kwargs): """ Change cache implementation. The contents of the old cache will be transferred to the new one. :param cache_impl: Name of cache implementation, must exist in AVAILABLE_CACHES """ new_cache = self._get_cache_impl(cache_impl, maxsize, **kwargs) self._populate_new_cache(new_cache) self.cache = new_cache def pop(self, key, default=None): return self.cache.pop(key, default) def items(self): return self.cache.items() @property def impl_name(self): """ Name of current cache implementation """ return self.cache.__class__.__name__ @property def maxsize(self): """ Current max size """ return self.cache.maxsize def set_maxsize(self, maxsize, **kwargs): """ Set maxsize. This involves creating a new cache and transferring the items. """ new_cache = self._get_cache_impl(self.impl_name, maxsize, **kwargs) self._populate_new_cache(new_cache) self.cache = new_cache def purge(self): """ Purge the cache of all items. """ with self.purge_lock: self.cache.clear() @property def currsize(self): return len(self.cache) def _populate_new_cache(self, new_cache): for key, value in self.cache.items(): new_cache[key] = value def _get_cache_impl(self, cache_impl, maxsize, **kwargs): if cache_impl not in self.AVAILABLE_CACHES: raise ZenpyCacheException( "No such cache: %s, available caches: %s" % (cache_impl, str(self.AVAILABLE_CACHES))) return getattr(cachetools, cache_impl)(maxsize, **kwargs) def __iter__(self): return self.cache.__iter__() def __getitem__(self, item): return self.cache[item] def __setitem__(self, key, value): if not issubclass(type(value), BaseObject): raise ZenpyCacheException( "{} is not a subclass of BaseObject!".format(type(value))) self.cache[key] = value def __delitem__(self, key): del self.cache[key] def __contains__(self, item): return item in self.cache def __len__(self): return len(self.cache) class ZenpyCacheManager: """ Interface to the various caches. """ def __init__(self, disabled=False): self.disabled = disabled self.mapping = { 'user': ZenpyCache('LRUCache', maxsize=10000), 'organization': ZenpyCache('LRUCache', maxsize=10000), 'group': ZenpyCache('LRUCache', maxsize=10000), 'brand': ZenpyCache('LRUCache', maxsize=10000), 'ticket': ZenpyCache('TTLCache', maxsize=10000, ttl=30), 'request': ZenpyCache('LRUCache', maxsize=10000), 'ticket_field': ZenpyCache('LRUCache', maxsize=10000), 'sharing_agreement': ZenpyCache('TTLCache', maxsize=10000, ttl=6000), 'identity': ZenpyCache('LRUCache', maxsize=10000) } def add(self, zenpy_object): """ Add a Zenpy object to the relevant cache. If no cache exists for this object nothing is done. """ object_type = get_object_type(zenpy_object) if object_type not in self.mapping or self.disabled: return attr_name = self._cache_key_attribute(object_type) cache_key = getattr(zenpy_object, attr_name) log.debug("Caching: [{}({}={})]".format( zenpy_object.__class__.__name__, attr_name, cache_key)) self.mapping[object_type][cache_key] = zenpy_object def delete(self, to_delete): """ Purge one or more items from the relevant caches """ if not isinstance(to_delete, list): to_delete = [to_delete] for zenpy_object in to_delete: object_type = get_object_type(zenpy_object) object_cache = self.mapping.get(object_type, None) if object_cache: removed_object = object_cache.pop(zenpy_object.id, None) if removed_object: log.debug("Cache RM: [%s %s]" % (object_type.capitalize(), zenpy_object.id)) def get(self, object_type, cache_key): """ Query the cache for a Zenpy object """ if object_type not in self.mapping or self.disabled: return None cache = self.mapping[object_type] if cache_key in cache: log.debug("Cache HIT: [%s %s]" % (object_type.capitalize(), cache_key)) return cache[cache_key] else: log.debug('Cache MISS: [%s %s]' % (object_type.capitalize(), cache_key)) def query_cache_by_object(self, zenpy_object): """ Convenience method for testing. Given an object, return the cached version """ object_type = get_object_type(zenpy_object) cache_key = self._cache_key_attribute(object_type) return self.get(object_type, getattr(zenpy_object, cache_key)) def purge_cache(self, object_type): """ Purge the named cache of all values. If no cache exists for object_type, nothing is done """ if object_type in self.mapping: cache = self.mapping[object_type] log.debug("Purging [{}] cache of {} values.".format( object_type, len(cache))) cache.purge() def in_cache(self, zenpy_object): """ Determine whether or not this object is in the cache """ object_type = get_object_type(zenpy_object) cache_key_attr = self._cache_key_attribute(object_type) return self.get(object_type, getattr(zenpy_object, cache_key_attr)) is not None def should_cache(self, zenpy_object): """ Determine whether or not this object should be cached (ie, a cache exists for it's object_type) """ return get_object_type(zenpy_object) in self.mapping def disable(self): """Disables cache""" self.disabled = True def enable(self): """Enables cache""" self.disabled = False def status(self): """Returns current cache status""" return 'Cache disabled' if self.disabled else 'Cache enabled' def get_cache_engines(self): """Returns list of caches available in cachetools""" return ZenpyCache.AVAILABLE_CACHES def _cache_key_attribute(self, object_type): """ Return the attribute used as the cache_key for a particular object type. """ # This function used to return the key for objects that are not referenced by id. # These objects are no longer cached (UserField, OrganizationalField) and so the # function has no purpose anymore. I'm leaving it here in case it comes in handy again return 'id'
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/cache.py
cache.py
import logging from datetime import date from datetime import datetime from requests.utils import quote from zenpy.lib.exception import ZenpyException from zenpy.lib.util import is_iterable_but_not_string, to_unix_ts __author__ = 'facetoe' try: unicode = unicode except NameError: # 'unicode' is undefined, must be Python 3 str = str unicode = str bytes = bytes basestring = (str, bytes) else: # 'unicode' exists, must be Python 2 str = str unicode = unicode bytes = str basestring = basestring try: from urllib import urlencode from urlparse import urlunsplit, SplitResult except ImportError: from urllib.parse import urlencode from urllib.parse import urlunsplit, SplitResult log = logging.getLogger(__name__) class Url(object): def __init__(self, path, params=None, netloc=None): self.scheme = 'https' self.path = path self.params = params or {} self.netloc = netloc def build(self): query = "&".join( {"{}={}".format(k, v) for k, v in self.params.items()}) return urlunsplit( SplitResult(scheme=self.scheme, netloc=self.netloc, path=self.path, query=query, fragment=None)) def prefix_path(self, prefix): self.path = "{}/{}".format(prefix, self.path) def __str__(self): return "{}({})".format( type(self).__name__, ", ".join({"{}={}".format(k, v) for k, v in vars(self).items()})) class BaseEndpoint(object): ISO_8601_FORMAT = '%Y-%m-%dT%H:%M:%SZ' ZENDESK_DATE_FORMAT = "%Y-%m-%d" def __init__(self, endpoint): self.endpoint = endpoint class PrimaryEndpoint(BaseEndpoint): """ The PrimaryEndpoint handles the most common endpoint operations. """ def __call__(self, **kwargs): parameters = {} path = self.endpoint for key, value in kwargs.items(): if key == 'id': path += "/{}.json".format(value) elif key == 'ids': path += '/show_many.json' parameters[key] = ",".join(map(str, value)) elif key == 'destroy_ids': path += '/destroy_many.json' parameters['ids'] = ",".join(map(str, value)) elif key == 'create_many': path += '/create_many.json' elif key == '/create_or_update_many': path = self.endpoint elif key == 'recover_ids': path += '/recover_many.json' parameters['ids'] = ",".join(map(str, value)) elif key == 'update_many': path += '/update_many.json' elif key == 'count_many': path += '/count_many.json' parameters[key] = ",".join(map(str, value)) elif key == 'external_id' and path == 'tickets': parameters[key] = value elif key in ('external_id', 'external_ids'): external_ids = [ value ] if not is_iterable_but_not_string(value) else value path += '/show_many.json' parameters['external_ids'] = ",".join(external_ids) elif key == 'update_many_external': path += '/update_many.json' parameters['external_ids'] = ",".join(map(str, value)) elif key == 'destroy_many_external': path += '/destroy_many.json' parameters['external_ids'] = ",".join(map(str, value)) elif key == 'label_names': parameters[key] = ",".join(value) elif key in ( 'sort_by', 'sort_order', 'permission_set', 'page', 'limit', 'cursor', 'filter_by', ): parameters[key] = value elif key == 'since': parameters[key] = value.strftime(self.ISO_8601_FORMAT) elif key == 'async': parameters[key] = str(value).lower() elif key == 'include': if is_iterable_but_not_string(value): parameters[key] = ",".join(value) elif value: parameters[key] = value elif key in ('since_id', 'ticket_id', 'issue_id'): # Jira integration parameters[key] = value # this is a bit of a hack elif key == 'role': if isinstance(value, basestring) or len(value) == 1: parameters['role[]'] = value else: parameters['role[]'] = value[0] + '&' + "&".join( ('role[]={}'.format(role) for role in value[1:])) elif key.endswith('ids'): # if it looks like a type of unknown id, send it through as such parameters[key] = ",".join(map(str, value)) elif key == 'cursor_pagination' and value: if value is True: parameters['page[size]'] = 100 else: parameters['page[size]'] = value if path == self.endpoint and not path.endswith('.json'): path += '.json' return Url(path=path, params=parameters) class SecondaryEndpoint(BaseEndpoint): def __call__(self, id, **kwargs): return Url(self.endpoint % dict(id=id), params=kwargs) class MultipleIDEndpoint(BaseEndpoint): def __call__(self, *args): if not args or len(args) < 2: raise ZenpyException( "This endpoint requires at least two arguments!") return Url(self.endpoint.format(*args)) class IncrementalEndpoint(BaseEndpoint): """ An IncrementalEndpoint takes a start_time parameter for querying the incremental api endpoint. Note: The Zendesk API expects UTC time. If a timezone aware datetime object is passed Zenpy will convert it to UTC, however if a naive object or unix timestamp is passed there is nothing Zenpy can do. It is recommended to always pass timezone aware objects to this endpoint. :param start_time: unix timestamp or datetime object :param include: list of items to sideload """ def __call__(self, start_time=None, include=None, per_page=None): if start_time is None: raise ZenpyException( "Incremental Endpoint requires a start_time parameter!") elif isinstance(start_time, datetime): unix_time = to_unix_ts(start_time) else: unix_time = start_time params = dict(start_time=str(unix_time)) if per_page: params["per_page"] = per_page if include is not None: if is_iterable_but_not_string(include): params.update(dict(include=",".join(include))) else: params.update(dict(include=include)) return Url(self.endpoint, params=params) class ChatIncrementalEndpoint(BaseEndpoint): """ An ChatsIncrementalEndpoint takes parameters for querying the chats incremental api endpoint. Note: The Zendesk API expects UTC time. If a timezone aware datetime object is passed Zenpy will convert it to UTC, however if a naive object or unix timestamp is passed there is nothing Zenpy can do. It is recommended to always pass timezone aware objects to this endpoint. :param start_time: unix timestamp or datetime object :param fields: list of chat fields to load without "chats(xxx)". Defaults to "*" """ def __call__(self, start_time=None, **kwargs): if start_time is None: raise ZenpyException( "Incremental Endpoint requires a start_time parameter!") elif isinstance(start_time, datetime): unix_time = to_unix_ts(start_time) else: unix_time = start_time params = kwargs params.update(dict(start_time=str(unix_time))) if 'fields' in kwargs: if is_iterable_but_not_string(kwargs['fields']): f = ",".join(kwargs['fields']) else: f = kwargs['fields'] else: f = "*" params.update(dict(fields="chats(" + f + ")")) return Url(self.endpoint, params=params) class AttachmentEndpoint(BaseEndpoint): def __call__(self, **kwargs): return Url(self.endpoint, params={k: v for k, v in kwargs.items() if v is not None}) class SearchEndpoint(BaseEndpoint): r""" The search endpoint accepts all the parameters defined in the Zendesk `Search Documentation <https://developer.zendesk.com/rest_api/docs/core/search>`_. Zenpy defines several keywords that are mapped to the Zendesk comparison operators: +-----------------+------------------+ | **Keyword** | **Operator** | +-----------------+------------------+ | keyword | : (equality) | +-----------------+------------------+ | \*_greater_than | > (numeric|type) | +-----------------+------------------+ | \*_less_than | < (numeric|type) | +-----------------+------------------+ | \*_after | > (time|date) | +-----------------+------------------+ | \*_before | < (time|date) | +-----------------+------------------+ | minus | \- (negation) | +-----------------+------------------+ | \*_between | > < (dates only) | +-----------------+------------------+ For example the call: .. code:: python zenpy.search("zenpy", created_between=[yesterday, today], type='ticket', minus='negated') Would generate the following API call: :: /api/v2/search.json?query=zenpy+created>2015-08-29 created<2015-08-30+type:ticket+-negated """ def __call__(self, query=None, **kwargs): renamed_kwargs = dict() modifiers = list() params = dict() for key, value in kwargs.items(): if isinstance(value, datetime): kwargs[key] = value.strftime(self.ISO_8601_FORMAT) elif isinstance(value, date): kwargs[key] = value.strftime(self.ZENDESK_DATE_FORMAT) elif is_iterable_but_not_string(value) and key == 'ids': kwargs[key] = ", ".join(map(str, value)) if key.endswith('_between'): modifiers.append(self.format_between(key, value)) elif key in ('sort_by', 'sort_order'): params[key] = value elif key.endswith('_after'): renamed_kwargs[key.replace('_after', '>')] = kwargs[key] elif key.endswith('_before'): renamed_kwargs[key.replace('_before', '<')] = kwargs[key] elif key.endswith('_greater_than'): renamed_kwargs[key.replace('_greater_than', '>')] = kwargs[key] elif key.endswith('_less_than'): renamed_kwargs[key.replace('_less_than', '<')] = kwargs[key] elif key == 'minus': if is_iterable_but_not_string(value): [modifiers.append("-%s" % v) for v in value] else: modifiers.append("-%s" % value) elif key == 'type': if value in ('ticket', 'organization', 'user', 'group'): params['filter[type]'] = value renamed_kwargs.update({key + ':': '%s' % value}) else: raise ZenpyException( "This endpoint supports only 'ticket', 'group', 'user' or 'organization' type filter") elif is_iterable_but_not_string(value): modifiers.append(self.format_or(key, value)) else: if isinstance(value, str) and value.count(' ') > 0: value = '"{}"'.format(value) renamed_kwargs.update({key + ':': '%s' % value}) search_query = [ '%s%s' % (key, value) for (key, value) in renamed_kwargs.items() ] search_query.extend(modifiers) if query is not None: search_query.insert(0, query) params['query'] = quote(' '.join(search_query)) return Url(self.endpoint, params) def format_between(self, key, values): if not is_iterable_but_not_string(values): raise ValueError( "*_between requires an iterable (list, set, tuple etc)") elif not len(values) == 2: raise ZenpyException("*_between requires exactly 2 items!") for value in values: if not isinstance(value, datetime): raise ValueError("*_between only works with datetime objects!") elif value.tzinfo is not None and value.utcoffset().total_seconds( ) != 0: log.warning( "search parameter '{}' requires UTC time, results likely incorrect." .format(key)) key = key.replace('_between', '') dates = [v.strftime(self.ISO_8601_FORMAT) for v in values] return "%s>%s %s<%s" % (key, dates[0], key, dates[1]) def format_or(self, key, values): return " ".join(['%s:"%s"' % (key, v) for v in values]) class RequestSearchEndpoint(BaseEndpoint): def __call__(self, query, **kwargs): kwargs['query'] = query return Url(self.endpoint, params=kwargs) class HelpDeskSearchEndpoint(BaseEndpoint): def __call__(self, query='', **kwargs): processed_kwargs = dict() for key, value in kwargs.items(): if isinstance(value, datetime): processed_kwargs[key] = value.strftime( self.ZENDESK_DATE_FORMAT) elif is_iterable_but_not_string(value): processed_kwargs[key] = ",".join(value) else: processed_kwargs[key] = value processed_kwargs['query'] = query return Url(self.endpoint, params=processed_kwargs) class SatisfactionRatingEndpoint(BaseEndpoint): def __call__(self, score=None, sort_order=None, start_time=None, end_time=None): if sort_order and sort_order not in ('asc', 'desc'): raise ZenpyException("sort_order must be one of (asc, desc)") params = dict() if score: params['score'] = score if sort_order: params['sort_order'] = sort_order if start_time: params['start_time'] = to_unix_ts(start_time) if end_time: params['end_time'] = to_unix_ts(end_time) return Url(self.endpoint, params=params) class MacroEndpoint(BaseEndpoint): def __call__(self, sort_order=None, sort_by=None, **kwargs): if sort_order and sort_order not in ('asc', 'desc'): raise ZenpyException("sort_order must be one of (asc, desc)") if sort_by and sort_by not in ('alphabetical', 'created_at', 'updated_at', 'usage_1h', 'usage_24h', 'usage_7d'): raise ZenpyException( "sort_by is invalid - https://developer.zendesk.com/rest_api/docs/core/macros#available-parameters" ) if 'id' in kwargs: if len(kwargs) > 1: raise ZenpyException( "When specifying an id it must be the only parameter") params = dict() path = self.endpoint for key, value in kwargs.items(): if isinstance(value, bool): value = str(value).lower() if key == 'id': path += "/{}.json".format(value) else: params[key] = value if sort_order: params['sort_order'] = sort_order if sort_by: params['sort_by'] = sort_by if path == self.endpoint: path += '.json' return Url(path, params=params) class ChatEndpoint(BaseEndpoint): def __call__(self, **kwargs): if len(kwargs) > 1: raise ZenpyException( "Only expect a single keyword to the ChatEndpoint") endpoint_path = self.endpoint params = dict() if 'ids' in kwargs: endpoint_path = self.endpoint params['ids'] = ','.join(kwargs['ids']) else: for key, value in kwargs.items(): if key == 'email': endpoint_path = '{}/email/{}'.format(self.endpoint, value) elif self.endpoint == 'departments' and key == 'name': endpoint_path = '{}/name/{}'.format(self.endpoint, value) else: endpoint_path = "{}/{}".format(self.endpoint, value) break return Url(endpoint_path, params=params) class ChatSearchEndpoint(BaseEndpoint): def __call__(self, *args, **kwargs): conditions = list() if args: conditions.append(' '.join(args)) conditions.extend(["{}:{}".format(k, v) for k, v in kwargs.items()]) query = " AND ".join(conditions) return Url(self.endpoint, params=dict(q=query)) class ViewSearchEndpoint(BaseEndpoint): def __call__(self, query, **kwargs): kwargs['query'] = query return Url(self.endpoint, params=kwargs) class WebhookEndpoint(BaseEndpoint): def __call__(self, **kwargs): path = self.endpoint params = {} for key, value in kwargs.items(): if key == 'id': path += "/{}".format(value) params = {} break elif key == 'clone_webhook_id': params = {'clone_webhook_id': value} break elif key == 'test_webhook_id': params = {'webhook_id': value} break elif key == 'filter': params['filter[name_contains]'] = value elif key == 'page_after': params['page[after]'] = value elif key == 'page_before': params['page[before]'] = value elif key == 'page_size': params['page[size]'] = value elif key == 'sort': if value in ['name', 'status']: params['sort'] = value else: raise ZenpyException("sort must be one of (name, status)") return Url(path, params) class EndpointFactory(object): """ Provide access to the various endpoints. """ activities = PrimaryEndpoint('activities') attachments = PrimaryEndpoint('attachments') attachments.upload = AttachmentEndpoint('uploads.json') automations = PrimaryEndpoint('automations') brands = PrimaryEndpoint('brands') chats = ChatEndpoint('chats') chats.account = ChatEndpoint('account') chats.agents = ChatEndpoint('agents') chats.agents.me = ChatEndpoint("agents/me") chats.bans = ChatEndpoint('bans') chats.departments = ChatEndpoint('departments') chats.goals = ChatEndpoint('goals') chats.triggers = ChatEndpoint('triggers') chats.shortcuts = ChatEndpoint('shortcuts') chats.visitors = ChatEndpoint('visitors') chats.search = ChatSearchEndpoint('chats/search') chats.stream = ChatSearchEndpoint('stream/chats') chats.incremental = ChatIncrementalEndpoint('incremental/chats') custom_agent_roles = PrimaryEndpoint('custom_roles') dynamic_contents = PrimaryEndpoint('dynamic_content/items') dynamic_contents.variants = SecondaryEndpoint( 'dynamic_content/items/%(id)s/variants.json') dynamic_contents.variants.show = MultipleIDEndpoint( 'dynamic_content/items/{}/variants/{}.json') dynamic_contents.variants.create = SecondaryEndpoint( 'dynamic_content/items/%(id)s/variants.json') dynamic_contents.variants.create_many = SecondaryEndpoint( 'dynamic_content/items/%(id)s/variants/create_many.json') dynamic_contents.variants.update = MultipleIDEndpoint( 'dynamic_content/items/{}/variants/{}.json') dynamic_contents.variants.update_many = SecondaryEndpoint( 'dynamic_content/items/%(id)s/variants/update_many.json') dynamic_contents.variants.delete = MultipleIDEndpoint( 'dynamic_content/items/{}/variants/{}.json') end_user = SecondaryEndpoint('end_users/%(id)s.json') group_memberships = PrimaryEndpoint('group_memberships') group_memberships.assignable = PrimaryEndpoint( 'group_memberships/assignable') group_memberships.make_default = MultipleIDEndpoint( 'users/{}/group_memberships/{}/make_default.json') groups = PrimaryEndpoint('groups') groups.assignable = PrimaryEndpoint('groups/assignable') groups.memberships = SecondaryEndpoint('groups/%(id)s/memberships.json') groups.memberships_assignable = SecondaryEndpoint( 'groups/%(id)s/memberships/assignable.json') groups.users = SecondaryEndpoint('groups/%(id)s/users.json') job_statuses = PrimaryEndpoint('job_statuses') locales = PrimaryEndpoint('locales') locales.agent = PrimaryEndpoint('locales/agent') locales.public = PrimaryEndpoint('locales/public') locales.current = PrimaryEndpoint('locales/current') links = PrimaryEndpoint('services/jira/links') macros = MacroEndpoint('macros') macros.apply = SecondaryEndpoint('macros/%(id)s/apply.json') nps = PrimaryEndpoint('nps') nps.recipients_incremental = IncrementalEndpoint( 'nps/incremental/recipients.json') nps.responses_incremental = IncrementalEndpoint( 'nps/incremental/responses.json') organization_memberships = PrimaryEndpoint('organization_memberships') organization_fields = PrimaryEndpoint('organization_fields') organization_fields.reorder = PrimaryEndpoint( 'organization_fields/reorder.json') organizations = PrimaryEndpoint('organizations') organizations.external = SecondaryEndpoint( 'organizations/search.json?external_id=%(id)s') organizations.incremental = IncrementalEndpoint( 'incremental/organizations.json') organizations.organization_fields = PrimaryEndpoint('organization_fields') organizations.organization_memberships = SecondaryEndpoint( 'organizations/%(id)s/organization_memberships.json') organizations.requests = SecondaryEndpoint( 'organizations/%(id)s/requests.json') organizations.tags = SecondaryEndpoint('organizations/%(id)s/tags.json') organizations.create_or_update = PrimaryEndpoint( 'organizations/create_or_update') organizations.users = SecondaryEndpoint('organizations/%(id)s/users.json') requests = PrimaryEndpoint('requests') requests.ccd = PrimaryEndpoint("requests/ccd") requests.comments = SecondaryEndpoint('requests/%(id)s/comments.json') requests.open = PrimaryEndpoint("requests/open") requests.search = RequestSearchEndpoint('requests/search.json') requests.solved = PrimaryEndpoint("requests/solved") satisfaction_ratings = SatisfactionRatingEndpoint('satisfaction_ratings') satisfaction_ratings.create = SecondaryEndpoint( 'tickets/%(id)s/satisfaction_rating.json') schedules = PrimaryEndpoint('business_hours/schedules') search = SearchEndpoint('search.json') search.count = SearchEndpoint('search/count.json') search_export = SearchEndpoint('search/export.json') sharing_agreements = PrimaryEndpoint('sharing_agreements') sla_policies = PrimaryEndpoint('slas/policies') sla_policies.definitions = PrimaryEndpoint('slas/policies/definitions') skips = PrimaryEndpoint('skips') suspended_tickets = PrimaryEndpoint('suspended_tickets') suspended_tickets.recover = SecondaryEndpoint( 'suspended_tickets/%(id)s/recover.json') tags = PrimaryEndpoint('tags') targets = PrimaryEndpoint('targets') ticket_fields = PrimaryEndpoint('ticket_fields') ticket_field_options = SecondaryEndpoint( 'ticket_fields/%(id)s/options.json') ticket_field_options.show = MultipleIDEndpoint( 'ticket_fields/{}/options/{}.json') ticket_field_options.update = SecondaryEndpoint( 'ticket_fields/%(id)s/options.json') ticket_field_options.delete = MultipleIDEndpoint( 'ticket_fields/{}/options/{}.json') ticket_forms = PrimaryEndpoint('ticket_forms') ticket_import = PrimaryEndpoint('imports/tickets') ticket_metrics = PrimaryEndpoint('ticket_metrics') ticket_metric_events = IncrementalEndpoint( 'incremental/ticket_metric_events.json') tickets = PrimaryEndpoint('tickets') tickets.audits = SecondaryEndpoint('tickets/%(id)s/audits.json') tickets.audits.cursor = PrimaryEndpoint('ticket_audits') tickets.comments = SecondaryEndpoint('tickets/%(id)s/comments.json') tickets.comments.redact = MultipleIDEndpoint( 'tickets/{0}/comments/{1}/redact.json') tickets.deleted = PrimaryEndpoint('deleted_tickets') tickets.events = IncrementalEndpoint('incremental/ticket_events.json') tickets.incidents = SecondaryEndpoint('tickets/%(id)s/incidents.json') tickets.incremental = IncrementalEndpoint('incremental/tickets.json') tickets.incremental.cursor = PrimaryEndpoint( 'incremental/tickets/cursor.json') tickets.incremental.cursor_start = IncrementalEndpoint( 'incremental/tickets/cursor.json') tickets.metrics = SecondaryEndpoint('tickets/%(id)s/metrics.json') tickets.metrics.incremental = IncrementalEndpoint( 'incremental/ticket_metric_events.json') tickets.organizations = SecondaryEndpoint( 'organizations/%(id)s/tickets.json') tickets.recent = SecondaryEndpoint('tickets/recent.json') tickets.tags = SecondaryEndpoint('tickets/%(id)s/tags.json') tickets.macro = MultipleIDEndpoint('tickets/{0}/macros/{1}/apply.json') tickets.merge = SecondaryEndpoint('tickets/%(id)s/merge.json') tickets.skips = SecondaryEndpoint('tickets/%(id)s/skips.json') topics = PrimaryEndpoint('topics') topics.tags = SecondaryEndpoint('topics/%(id)s/tags.json') triggers = PrimaryEndpoint('triggers') user_fields = PrimaryEndpoint('user_fields') users = PrimaryEndpoint('users') users.assigned = SecondaryEndpoint('users/%(id)s/tickets/assigned.json') users.cced = SecondaryEndpoint('users/%(id)s/tickets/ccd.json') users.create_or_update = PrimaryEndpoint('users/create_or_update') users.create_or_update_many = PrimaryEndpoint( 'users/create_or_update_many.json') users.group_memberships = SecondaryEndpoint( 'users/%(id)s/group_memberships.json') users.deleted = PrimaryEndpoint("deleted_users") users.groups = SecondaryEndpoint('users/%(id)s/groups.json') users.incremental = IncrementalEndpoint('incremental/users.json') users.me = PrimaryEndpoint('users/me') users.merge = SecondaryEndpoint('users/%(id)s/merge.json') users.organization_memberships = SecondaryEndpoint( 'users/%(id)s/organization_memberships.json') users.organizations = SecondaryEndpoint('users/%(id)s/organizations.json') users.related = SecondaryEndpoint('users/%(id)s/related.json') users.requested = SecondaryEndpoint('users/%(id)s/tickets/requested.json') users.requests = SecondaryEndpoint('users/%(id)s/requests.json') users.tags = SecondaryEndpoint('users/%(id)s/tags.json') users.set_password = SecondaryEndpoint('users/%(id)s/password.json') users.identities = SecondaryEndpoint('users/%(id)s/identities.json') users.identities.show = MultipleIDEndpoint('users/{0}/identities/{1}.json') users.identities.update = MultipleIDEndpoint( 'users/{0}/identities/{1}.json') users.identities.make_primary = MultipleIDEndpoint( 'users/{0}/identities/{1}/make_primary') users.identities.verify = MultipleIDEndpoint( 'users/{0}/identities/{1}/verify') users.identities.request_verification = MultipleIDEndpoint( 'users/{0}/identities/{1}/request_verification.json') users.identities.delete = MultipleIDEndpoint( 'users/{0}/identities/{1}.json') users.skips = SecondaryEndpoint('users/%(id)s/skips.json') users.search = PrimaryEndpoint('users/search.json') views = PrimaryEndpoint('views') views.active = PrimaryEndpoint('views/active') views.compact = PrimaryEndpoint('views/compact') views.count = SecondaryEndpoint('views/%(id)s/count.json') views.tickets = SecondaryEndpoint('views/%(id)s/tickets') views.execute = SecondaryEndpoint('views/%(id)s/execute.json') views.export = SecondaryEndpoint('views/%(id)s/export.json') views.search = ViewSearchEndpoint('views/search.json') recipient_addresses = PrimaryEndpoint('recipient_addresses') class Dummy(object): pass talk = Dummy() talk.calls = Dummy() talk.calls.incremental = IncrementalEndpoint( 'channels/voice/stats/incremental/calls.json') talk.current_queue_activity = PrimaryEndpoint( 'channels/voice/stats/current_queue_activity') talk.agents_activity = PrimaryEndpoint( 'channels/voice/stats/agents_activity') talk.availability = SecondaryEndpoint( 'channels/voice/availabilities/%(id)s.json') talk.account_overview = PrimaryEndpoint( 'channels/voice/stats/account_overview') talk.agents_overview = PrimaryEndpoint( 'channels/voice/stats/agents_overview') talk.phone_numbers = PrimaryEndpoint('channels/voice/phone_numbers.json') talk.legs = Dummy() talk.legs.incremental = IncrementalEndpoint( 'channels/voice/stats/incremental/legs.json') talk_pe = Dummy() talk_pe.display_user = MultipleIDEndpoint( 'channels/voice/agents/{}/users/{}/display.json') talk_pe.display_ticket = MultipleIDEndpoint( 'channels/voice/agents/{}/tickets/{}/display.json') talk_pe.create_ticket = PrimaryEndpoint( 'channels/voice/tickets.json') help_centre = Dummy() help_centre.articles = PrimaryEndpoint('help_center/articles') help_centre.articles.create = SecondaryEndpoint( 'help_center/sections/%(id)s/articles.json') help_centre.articles.comments = SecondaryEndpoint( 'help_center/articles/%(id)s/comments.json') help_centre.articles.comments_update = MultipleIDEndpoint( 'help_center/articles/{}/comments/{}.json') help_centre.articles.comments_delete = MultipleIDEndpoint( 'help_center/articles/{}/comments/{}.json') help_centre.articles.comment_show = MultipleIDEndpoint( 'help_center/articles/{}/comments/{}.json') help_centre.articles.user_comments = SecondaryEndpoint( 'help_center/users/%(id)s/comments.json') help_centre.articles.labels = SecondaryEndpoint( 'help_center/articles/%(id)s/labels.json') help_centre.articles.translations = SecondaryEndpoint( 'help_center/articles/%(id)s/translations.json') help_centre.articles.create_translation = SecondaryEndpoint( 'help_center/articles/%(id)s/translations.json') help_centre.articles.missing_translations = SecondaryEndpoint( 'help_center/articles/%(id)s/translations/missing.json') help_centre.articles.update_translation = MultipleIDEndpoint( 'help_center/articles/{}/translations/{}.json') help_centre.articles.show_translation = MultipleIDEndpoint( 'help_center/articles/{}/translations/{}.json') help_centre.articles.delete_translation = SecondaryEndpoint( 'help_center/translations/%(id)s.json') help_centre.articles.search = HelpDeskSearchEndpoint( 'help_center/articles/search.json') help_centre.articles.subscriptions = SecondaryEndpoint( 'help_center/articles/%(id)s/subscriptions.json') help_centre.articles.subscriptions_delete = MultipleIDEndpoint( 'help_center/articles/{}/subscriptions/{}.json') help_centre.articles.votes = SecondaryEndpoint( 'help_center/articles/%(id)s/votes.json') help_centre.articles.votes.up = SecondaryEndpoint( 'help_center/articles/%(id)s/up.json') help_centre.articles.votes.down = SecondaryEndpoint( 'help_center/articles/%(id)s/down.json') help_centre.articles.comment_votes = MultipleIDEndpoint( 'help_center/articles/{}/comments/{}/votes.json') help_centre.articles.comment_votes.up = MultipleIDEndpoint( 'help_center/articles/{}/comments/{}/up.json') help_centre.articles.comment_votes.down = MultipleIDEndpoint( 'help_center/articles/{}/comments/{}/down.json') help_centre.articles.incremental = IncrementalEndpoint( 'help_center/incremental/articles.json') help_centre.labels = PrimaryEndpoint('help_center/articles/labels') help_centre.labels.create = SecondaryEndpoint( 'help_center/articles/%(id)s/labels.json') help_centre.labels.delete = MultipleIDEndpoint( 'help_center/articles/{}/labels/{}.json') help_centre.attachments = SecondaryEndpoint( 'help_center/articles/%(id)s/attachments.json') help_centre.attachments.inline = SecondaryEndpoint( 'help_center/articles/%(id)s/attachments/inline.json') help_centre.attachments.block = SecondaryEndpoint( 'help_center/articles/%(id)s/attachments/block.json') help_centre.attachments.create = SecondaryEndpoint( 'help_center/articles/%(id)s/attachments.json') help_centre.attachments.create_unassociated = PrimaryEndpoint( 'help_center/articles/attachments') help_centre.attachments.delete = SecondaryEndpoint( 'help_center/articles/attachments/%(id)s.json') help_centre.attachments.bulk_attachments = SecondaryEndpoint( 'help_center/articles/%(id)s/bulk_attachments.json') help_centre.categories = PrimaryEndpoint('help_center/categories') help_centre.categories.articles = SecondaryEndpoint( 'help_center/categories/%(id)s/articles.json') help_centre.categories.sections = SecondaryEndpoint( 'help_center/categories/%(id)s/sections.json') help_centre.categories.translations = SecondaryEndpoint( 'help_center/categories/%(id)s/translations.json') help_centre.categories.create_translation = SecondaryEndpoint( 'help_center/categories/%(id)s/translations.json') help_centre.categories.missing_translations = SecondaryEndpoint( 'help_center/categories/%(id)s/translations/missing.json') help_centre.categories.update_translation = MultipleIDEndpoint( 'help_center/categories/{}/translations/{}.json') help_centre.categories.delete_translation = SecondaryEndpoint( 'help_center/translations/%(id)s.json') help_centre.sections = PrimaryEndpoint('help_center/sections') help_centre.sections.create = SecondaryEndpoint( 'help_center/categories/%(id)s/sections.json') help_centre.sections.articles = SecondaryEndpoint( 'help_center/sections/%(id)s/articles.json') help_centre.sections.translations = SecondaryEndpoint( 'help_center/sections/%(id)s/translations.json') help_centre.sections.create_translation = SecondaryEndpoint( 'help_center/sections/%(id)s/translations.json') help_centre.sections.missing_translations = SecondaryEndpoint( 'help_center/sections/%(id)s/translations/missing.json') help_centre.sections.update_translation = MultipleIDEndpoint( 'help_center/sections/{}/translations/{}.json') help_centre.sections.delete_translation = SecondaryEndpoint( 'help_center/translations/%(id)s.json') help_centre.sections.subscriptions = SecondaryEndpoint( 'help_center/sections/%(id)s/subscriptions.json') help_centre.sections.subscriptions_delete = MultipleIDEndpoint( 'help_center/sections/{}/subscriptions/{}.json') help_centre.sections.access_policies = SecondaryEndpoint( 'help_center/sections/%(id)s/access_policy.json') help_centre.topics = PrimaryEndpoint("community/topics") help_centre.topics.posts = SecondaryEndpoint( 'community/topics/%(id)s/posts.json') help_centre.topics.subscriptions = SecondaryEndpoint( 'community/topics/%(id)s/subscriptions.json') help_centre.topics.subscriptions_delete = MultipleIDEndpoint( 'community/topics/{}/subscriptions/{}.json') help_centre.topics.access_policies = SecondaryEndpoint( 'community/topics/%(id)s/access_policy.json') help_centre.posts = PrimaryEndpoint('community/posts') help_centre.posts.subscriptions = SecondaryEndpoint( 'community/posts/%(id)s/subscriptions.json') help_centre.posts.subscriptions_delete = MultipleIDEndpoint( 'community/posts/{}/subscriptions/{}.json') help_centre.posts.comments = SecondaryEndpoint( 'community/posts/%(id)s/comments.json') help_centre.posts.comments.delete = MultipleIDEndpoint( 'community/posts/{}/comments/{}.json') help_centre.posts.comments.update = MultipleIDEndpoint( 'community/posts/{}/comments/{}.json') help_centre.posts.votes = SecondaryEndpoint( 'community/posts/%(id)s/votes.json') help_centre.posts.votes.up = SecondaryEndpoint( 'community/posts/%(id)s/up.json') help_centre.posts.votes.down = SecondaryEndpoint( 'community/posts/%(id)s/down.json') help_centre.posts.comments.comment_votes = MultipleIDEndpoint( 'community/posts/{}/comments/{}/votes.json') help_centre.posts.comments.comment_votes.up = MultipleIDEndpoint( 'community/posts/{}/comments/{}/up.json') help_centre.posts.comments.comment_votes.down = MultipleIDEndpoint( 'community/posts/{}/comments/{}/down.json') help_centre.user_segments = PrimaryEndpoint('help_center/user_segments') help_centre.user_segments.applicable = PrimaryEndpoint( 'help_center/user_segments/applicable') help_centre.user_segments.sections = SecondaryEndpoint( 'help_center/user_segments/%(id)s/sections.json') help_centre.user_segments.topics = SecondaryEndpoint( 'help_center/user_segments/%(id)s/topics.json') # Note the use of "guide" instead of "help_center" in the API endpoint help_centre.permission_groups = PrimaryEndpoint('guide/permission_groups') zis = Dummy() zis.registry = Dummy() zis.registry.create_integration = SecondaryEndpoint('%(id)s') zis.registry.upload_bundle = SecondaryEndpoint('%(id)s/bundles') zis.registry.install = MultipleIDEndpoint( 'job_specs/install?job_spec_name=zis:{}:job_spec:{}') webhooks = WebhookEndpoint('webhooks') webhooks.invocations = SecondaryEndpoint('webhooks/%(id)s/invocations') webhooks.invocation_attempts = MultipleIDEndpoint( 'webhooks/{}/invocations/{}/attempts') webhooks.test = WebhookEndpoint('webhooks/test') webhooks.secret = SecondaryEndpoint('webhooks/%(id)s/signing_secret') def __new__(cls, endpoint_name): return getattr(cls, endpoint_name)
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/endpoint.py
endpoint.py
class ProxyDict(dict): """ Proxy for dict, records when the dictionary has been modified. """ def __init__(self, *args, **kwargs): self.dirty_callback = kwargs.pop('dirty_callback', None) super(dict, self).__init__() dict.update(self, *args, **kwargs) self._sentinel = object() self._dirty = False def update(self, *args, **kwargs): dict.update(self, *args, **kwargs) self._set_dirty() def pop(self, key, default=None): dict.pop(self, key, default) self._set_dirty() def popitem(self): r = dict.popitem(self) self._set_dirty() return r def clear(self): dict.clear(self) self._set_dirty() def _clean_dirty(self): self._dirty = False def _set_dirty(self): if self.dirty_callback is not None: self.dirty_callback() self._dirty = True def __getitem__(self, k): element = dict.__getitem__(self, k) wrapped = self._wrap_element(element) dict.__setitem__(self, k, wrapped) return wrapped def __delitem__(self, k): dict.__delitem__(self, k) self._set_dirty() def __setitem__(self, k, v): dict.__setitem__(self, k, v) self._set_dirty() def _wrap_element(self, element): """ We want to know if an item is modified that is stored in this dict. If the element is a list or dict, we wrap it in a ProxyList or ProxyDict, and if it is modified execute a callback that updates this instance. If it is a ZenpyObject, then the callback updates the parent object. """ def dirty_callback(): self._set_dirty() if isinstance(element, list): element = ProxyList(element, dirty_callback=dirty_callback) elif isinstance(element, dict): element = ProxyDict(element, dirty_callback=dirty_callback) # If it is a Zenpy object this will either return None or the previous wrapper. elif getattr(element, '_dirty_callback', self._sentinel) is not self._sentinel: # Don't set callback if already set. if not callable(element._dirty_callback): element._dirty_callback = dirty_callback return element class ProxyList(list): """ Proxy for list, records when the list has been modified. """ def __init__(self, iterable=None, dirty_callback=None): list.__init__(self, iterable or []) self.dirty_callback = dirty_callback self._dirty = False self._sentinel = object() # Doesn't exist in 2.7 if hasattr(list, 'clear'): def clear(): list.clear(self) self._set_dirty() self.clear = clear def _clean_dirty(self): self._dirty = False def _set_dirty(self): if self.dirty_callback is not None: self.dirty_callback() self._dirty = True def append(self, item): list.append(self, item) self._set_dirty() def extend(self, iterable): list.extend(self, iterable) self._set_dirty() def insert(self, index, item): list.insert(self, index, item) self._set_dirty() def remove(self, item): list.remove(self, item) self._set_dirty() def __getitem__(self, item): element = list.__getitem__(self, item) wrapped = self._wrap_element(element) self[item] = wrapped return wrapped def __iter__(self): for index, element in enumerate(list.__iter__(self), start=0): wrapped = self._wrap_element(element) list.__setitem__(self, index, wrapped) yield wrapped def pop(self, index=-1): r = list.pop(self, index) self._set_dirty() return r def __delitem__(self, key): list.__delitem__(self, key) self._set_dirty() def __setitem__(self, key, value): list.__setitem__(self, key, value) self._set_dirty() def __iadd__(self, other): r = list.__iadd__(self, other) self._set_dirty() return r def __imul__(self, other): r = list.__imul__(self, other) self._set_dirty() return r def _wrap_element(self, element): """ We want to know if an item is modified that is stored in this list. If the element is a list or dict, we wrap it in a ProxyList or ProxyDict, and if it is modified execute a callback that updates this instance. If it is a ZenpyObject, then the callback updates the parent object. """ def dirty_callback(): self._set_dirty() if isinstance(element, list): element = ProxyList(element, dirty_callback=dirty_callback) elif isinstance(element, dict): element = ProxyDict(element, dirty_callback=dirty_callback) # If it is a Zenpy object this will either return None or the previous wrapper. elif getattr(element, '_dirty_callback', self._sentinel) is not self._sentinel: # Don't set callback if already set. if not callable(element._dirty_callback): element._dirty_callback = dirty_callback return element
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/proxy.py
proxy.py
import logging import zenpy from zenpy.lib.api_objects import * from zenpy.lib.api_objects.chat_objects import * from zenpy.lib.api_objects.help_centre_objects import Article, Category, Section, Label, Translation, Topic, Post, \ Subscription, Vote, AccessPolicy, UserSegment, ManagementPermissionGroup from zenpy.lib.api_objects.talk_objects import * from zenpy.lib.api_objects.zis_objects import * from zenpy.lib.exception import ZenpyException from zenpy.lib.proxy import ProxyDict, ProxyList from zenpy.lib.util import as_singular, get_object_type log = logging.getLogger(__name__) __author__ = 'facetoe' class ZendeskObjectMapping(object): """ Handle converting Zendesk Support JSON objects to Python ones. """ class_mapping = { 'ticket': Ticket, 'deleted_ticket': Ticket, 'user': User, 'deleted_user': User, 'organization': Organization, 'group': Group, 'brand': Brand, 'topic': Topic, 'comment': Comment, 'attachment': Attachment, 'thumbnail': Thumbnail, 'metadata': Metadata, 'system': System, 'create': CreateEvent, 'change': ChangeEvent, 'notification': NotificationEvent, 'voicecomment': VoiceCommentEvent, 'commentprivacychange': CommentPrivacyChangeEvent, 'satisfactionrating': SatisfactionRatingEvent, 'ticketsharingevent': TicketSharingEvent, 'organizationactivity': OrganizationActivityEvent, 'error': ErrorEvent, 'tweet': TweetEvent, 'facebookevent': FacebookEvent, 'facebookcomment': FacebookCommentEvent, 'external': ExternalEvent, 'logmeintranscript': LogmeinTranscriptEvent, 'push': PushEvent, 'cc': CcEvent, 'via': Via, 'source': Source, 'job_status': JobStatus, 'audit': Audit, 'ticket_event': TicketEvent, 'tag': Tag, 'suspended_ticket': SuspendedTicket, 'ticket_audit': TicketAudit, 'satisfaction_rating': SatisfactionRating, 'activity': Activity, 'group_membership': GroupMembership, 'ticket_metric': TicketMetric, 'ticket_metric_event': TicketMetricEvent, 'status': Status, 'ticket_metric_item': TicketMetricItem, 'user_field': UserField, 'organization_field': OrganizationField, 'ticket_field': TicketField, 'ticket_form': TicketForm, 'request': Request, 'user_related': UserRelated, 'organization_membership': OrganizationMembership, 'upload': Upload, 'sharing_agreement': SharingAgreement, 'macro': Macro, 'result': MacroResult, 'job_status_result': JobStatusResult, 'agentmacroreference': AgentMacroReference, 'identity': Identity, 'view': View, 'conditions': Conditions, 'view_row': ViewRow, 'view_count': ViewCount, 'export': Export, 'sla_policy': SlaPolicy, 'policy_metric': PolicyMetric, 'definitions': Definitions, 'recipient_address': RecipientAddress, 'recipient': Recipient, 'response': Response, 'trigger': zenpy.lib.api_objects.Trigger, 'automation': Automation, 'item': Item, 'target': Target, 'locale': Locale, 'custom_field_option': CustomFieldOption, 'variant': Variant, 'link': Link, 'skip': Skip, 'schedule': Schedule, 'custom_role': CustomAgentRole, 'integration': Integration, 'webhook': Webhook, 'invocation': Invocation, 'invocation_attempt': InvocationAttempt, 'signing_secret': WebhookSecret, } skip_attrs = [] always_dirty = {} def __init__(self, api): self.api = api self.skip_attrs = ['user_fields', 'organization_fields'] self.always_dirty = dict(conditions=('all', 'any'), organization_field=('custom_field_options', ), ticket_field=('custom_field_options', ), user=('name', )) def object_from_json(self, object_type, object_json, parent=None): """ Given a blob of JSON representing a Zenpy object, recursively deserialize it and any nested objects it contains. This method also adds the deserialized object to the relevant cache if applicable. """ if not isinstance(object_json, dict): return object_json obj = self.instantiate_object(object_type, parent) for key, value in object_json.items(): if key not in self.skip_attrs: key, value = self._deserialize(key, obj, value) if isinstance(value, dict): value = ProxyDict(value, dirty_callback=getattr( obj, '_dirty_callback', None)) elif isinstance(value, list): value = ProxyList(value, dirty_callback=getattr( obj, '_dirty_callback', None)) setattr(obj, key, value) if hasattr(obj, '_clean_dirty'): obj._clean_dirty() self.api.cache.add(obj) return obj def instantiate_object(self, object_type, parent): """ Instantiate a Zenpy object. If this object has a parent, add a callback to call the parent if it is modified. This is so the parent object is correctly marked as dirty when a child is modified, eg: view.conditions.all.append(<something>) Also, some attributes need to be sent together to Zendesk together if either is modified. For example, Condition objects need to send both "all" and "any", even if only one has changed. If we have such values configured, add them. They will be inspected in the objects to_dict method on serialization. """ ZenpyClass = self.class_for_type(object_type) obj = ZenpyClass(api=self.api) if parent: def dirty_callback(): parent._dirty = True obj._dirty = True obj._dirty_callback = dirty_callback obj._always_dirty.update(self.always_dirty.get(object_type, [])) return obj def _deserialize(self, key, obj, value): if isinstance(value, dict): key = self.format_key(key, parent=obj) if key in self.class_mapping: value = self.object_from_json(key, value, parent=obj) elif as_singular(key) in self.class_mapping: value = self.object_from_json(as_singular(key), value, parent=obj) elif isinstance(value, list) and self.format_key( as_singular(key), parent=obj) in self.class_mapping: zenpy_objects = list() for item in value: object_type = self.format_key(as_singular(key), parent=obj) zenpy_objects.append( self.object_from_json(object_type, item, parent=obj)) value = zenpy_objects return key, value def class_for_type(self, object_type): """ Given an object_type return the class associated with it. """ if object_type not in self.class_mapping: raise ZenpyException("Unknown object_type: " + str(object_type)) else: return self.class_mapping[object_type] def format_key(self, key, parent): if key == 'result': key = "{}_result".format(get_object_type(parent)) elif key in ('from',): key = '{}_'.format(key) return key class ChatObjectMapping(ZendeskObjectMapping): """ Handle converting Chat API objects to Python ones. This class exists to prevent namespace collisions between APIs. """ class_mapping = { 'chat': Chat, 'offline_msg': OfflineMessage, 'session': Session, 'response_time': ResponseTime, 'visitor': Visitor, 'webpath': Webpath, 'count': Count, 'shortcut': Shortcut, 'trigger': zenpy.lib.api_objects.chat_objects.Trigger, 'ban': Ban, 'account': Account, 'plan': Plan, 'billing': Billing, 'agent': Agent, 'roles': Roles, 'search_result': SearchResult, 'ip_address': IpAddress, 'department': Department, 'goal': Goal } class HelpCentreObjectMapping(ZendeskObjectMapping): """ Handle converting Helpdesk API objects to Python ones. This class exists to prevent namespace collisions between APIs. """ class_mapping = { 'article': Article, 'category': Category, 'section': Section, 'comment': zenpy.lib.api_objects.help_centre_objects.Comment, 'article_attachment': zenpy.lib.api_objects.help_centre_objects.ArticleAttachment, 'label': Label, 'translation': Translation, 'topic': zenpy.lib.api_objects.help_centre_objects.Topic, 'post': Post, 'subscription': Subscription, 'vote': Vote, 'access_policy': AccessPolicy, 'user_segment': UserSegment, 'permission_group': ManagementPermissionGroup } class TalkObjectMapping(ZendeskObjectMapping): """ Handle converting Talk API objects to Python ones. This class exists to prevent namespace collisions between APIs. """ class_mapping = { 'call': Call, 'account_overview': AccountOverview, 'agents_activity': AgentsActivity, 'agents_overview': AgentsOverview, 'current_queue_activity': CurrentQueueActivity, 'phone_numbers': PhoneNumbers, 'availability': ShowAvailability, 'leg': Leg }
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/mapping.py
mapping.py
from __future__ import division import re from abc import abstractmethod from datetime import datetime, timedelta from zenpy.lib.util import as_plural from zenpy.lib.exception import SearchResponseLimitExceeded try: from collections.abc import Iterable except ImportError: from collections import Iterable import six from math import ceil __author__ = 'facetoe' import logging log = logging.getLogger(__name__) class BaseResultGenerator(Iterable): """ Base class for result generators. Subclasses should implement process_page() and return a list of results. """ def __init__(self, response_handler, response_json): self.response_handler = response_handler self._response_json = response_json self.values = None self.position = 0 self.update_attrs() self._has_sliced = False self.next_page_attr = 'next_page' @abstractmethod def process_page(self): """ Subclasses should do whatever processing is necessary and return a list of the results. """ def next(self): if self.values is None: self.values = self.process_page() if self.position >= len(self.values): self.handle_pagination() if len(self.values) < 1: raise StopIteration() zenpy_object = self.values[self.position] self.position += 1 return zenpy_object def handle_pagination(self, page_num=None, page_size=None): """ Handle retrieving and processing the next page of results. """ self._response_json = self.get_next_page(page_num=page_num, page_size=page_size) self.update_attrs() self.position = 0 self.values = self.process_page() def update_attrs(self): """ Add attributes such as count/end_time that can be present """ for key, value in self._response_json.items(): if key != 'results' and type(value) not in (list, dict): setattr(self, key, value) def get_next_page(self, page_num, page_size): """ Retrieve the next page of results. """ url = self._response_json.get(self.next_page_attr, None) if url is None: raise StopIteration() params, url = self.process_url(page_num, page_size, url) response = self.response_handler.api._get(url, raw_response=True, params=params) return response.json() def process_url(self, page_num, page_size, url): """ When slicing, remove the per_page and page parameters and pass to requests in the params dict """ params = dict() if page_num is not None: url = re.sub(r'page=\d+', '', url) params['page'] = page_num if page_size is not None: url = re.sub(r'per_page=\d+', '', url) params['per_page'] = page_size return params, url def __getitem__(self, item): if isinstance(item, slice): return self._handle_slice(item) raise TypeError("only slices are supported!") def _handle_slice(self, slice_object): if self._has_sliced: raise NotImplementedError( "the current slice implementation does not support multiple accesses!" ) start, stop, page_size = slice_object.start or 0, \ slice_object.stop or len(self), \ slice_object.step or 100 if any((val < 0 for val in (start, stop, page_size))): raise ValueError( "negative values not supported in slice operations!") next_page = self._response_json.get("next_page") if next_page and 'incremental' in next_page: raise NotImplementedError( "the current slice implementation does not support incremental APIs!" ) if self._response_json.get("before_cursor", None): raise NotImplementedError( "cursor based pagination cannot be sliced!") if self.values is None: self.values = self.process_page() values_length = len(self.values) if start > values_length or stop > values_length: result = self._retrieve_slice(start, stop, page_size) else: result = self.values[start:stop] self._has_sliced = True return result def _retrieve_slice(self, start, stop, page_size): # Calculate our range of pages. min_page = ceil(start / page_size) max_page = ceil(stop / page_size) + 1 if six.PY2: min_page = int(min_page) max_page = int(max_page) # Calculate the lower and upper bounds for the final slice. padding = ((max_page - min_page) - 1) * page_size lower = start % page_size or page_size upper = (stop % page_size or page_size) + padding # If we can use these objects, use them. consume_first_page = False if start <= len(self.values): consume_first_page = True # Gather all the objects in the range we want. to_slice = list() for i, page_num in enumerate(range(min_page, max_page)): if i == 0 and consume_first_page: to_slice.extend(self.values) else: self.handle_pagination(page_num=page_num, page_size=page_size) to_slice.extend(self.values) # Finally return the range of objects the user requested. return to_slice[lower:upper] def __iter__(self): return self def __len__(self): if hasattr(self, 'count'): return self.count elif self.values is not None: return len(self.values) else: return 0 def __next__(self): return self.next() class ZendeskResultGenerator(BaseResultGenerator): """ Generic result generator for offset pagination. """ def __init__(self, response_handler, response_json, response_objects=None, object_type=None): super(ZendeskResultGenerator, self).__init__(response_handler, response_json) self.object_type = object_type or self.response_handler.api.object_type self.values = response_objects or None def process_page(self): response_objects = self.response_handler.deserialize( self._response_json) return response_objects[as_plural(self.object_type)] def get_next_page(self, page_num=None, page_size=None): end_time = self._response_json.get('end_time', None) # If we are calling an incremental API, make sure to honour the restrictions if end_time: # We can't request updates from an incremental api if the # start_time value is less than 5 minutes in the future. if (datetime.fromtimestamp(int(end_time)) + timedelta(minutes=5)) > datetime.now(): raise StopIteration # No more pages to request if self._response_json.get("end_of_stream") is True: raise StopIteration return super(ZendeskResultGenerator, self).get_next_page(page_num, page_size) class SearchResultGenerator(BaseResultGenerator): """ Result generator for search queries. """ def process_page(self): search_results = list() for object_json in self._response_json['results']: object_type = object_json.pop('result_type') search_results.append( self.response_handler.api._object_mapping.object_from_json( object_type, object_json)) return search_results def get_next_page(self, page_num, page_size): try: return super(SearchResultGenerator, self).get_next_page(page_num, page_size) except SearchResponseLimitExceeded: log.error( 'This search has resulted in more results than zendesk allows. We got what we could.' ) raise StopIteration() class CursorResultsGenerator(BaseResultGenerator): """ Generator for iterable endpoint results with cursor """ def get_next_page(self): """ Retrieve the next page of results. """ meta = self._response_json.get('meta') if meta and meta.get('has_more'): url = self._response_json.get('links').get('next') log.debug('There are more results via url={}, retrieving'.format(url)) response = self.response_handler.api._get(url, raw_response=True) new_json = response.json() if hasattr(self, 'object_type') and len(new_json.get(as_plural(self.object_type))) == 0: """ Probably a bug: when the total amount is a multiple of the page size, the very last page comes empty. """ log.debug('Empty page has got, stopping iteration') raise StopIteration() else: return new_json else: log.debug('No more results available, stopping iteration') raise StopIteration() def handle_pagination(self): """ Handle retrieving and processing the next page of results. """ self._response_json = self.get_next_page() self.values.extend(self.process_page()) class GenericCursorResultsGenerator(CursorResultsGenerator): """ Generic result generator for cursor pagination. """ def __init__(self, response_handler, response_json, response_objects=None, object_type=None): super(GenericCursorResultsGenerator, self).__init__(response_handler, response_json) self.object_type = object_type or self.response_handler.api.object_type self.values = response_objects or None def process_page(self): response_objects = self.response_handler.deserialize( self._response_json) return response_objects[as_plural(self.object_type)] class SearchExportResultGenerator(CursorResultsGenerator): """ Generator for Search Export endpoint results """ def process_page(self): search_results = list() for object_json in self._response_json['results']: object_type = object_json.pop('result_type') search_results.append( self.response_handler.api._object_mapping.object_from_json( object_type, object_json)) return search_results class WebhookInvocationsResultGenerator(CursorResultsGenerator): """ Generator for Webhook Invocations endpoint """ def process_page(self): search_results = list() for object_json in self._response_json['invocations']: search_results.append( self.response_handler.api._object_mapping.object_from_json( 'invocation', object_json)) return search_results class WebhooksResultGenerator(CursorResultsGenerator): """ Generator for Webhooks list """ def process_page(self): search_results = list() for object_json in self._response_json['webhooks']: search_results.append( self.response_handler.api._object_mapping.object_from_json( 'webhook', object_json)) return search_results class TicketCursorGenerator(ZendeskResultGenerator): """ Generator for cursor based incremental export endpoints for ticket and ticket_audit objects. """ def __init__(self, response_handler, response_json, object_type): super(TicketCursorGenerator, self).__init__(response_handler, response_json, response_objects=None, object_type=object_type) self.next_page_attr = 'after_url' def __reversed__(self): # Flip the direction we grab pages. self.next_page_attr = 'before_url' if self.next_page_attr == 'after_url' else 'after_url' # Special case for when the generator is reversed before consuming any values. if self.values is None: self.values = list(self.process_page()) # Not all values were consumed, begin returning items at position -1. elif self.position != 0: self.values = list(self.values[:self.position - 2]) self.position = 0 else: self.handle_pagination() return iter(self) class JiraLinkGenerator(ZendeskResultGenerator): def __init__(self, response_handler, response_json, response): # The Jira links API does not provide a next_page in the JSON response. # Save the raw requests response to support filtering (e.g. ticket_id or # issue_id) during pagination. self.response = response super(JiraLinkGenerator, self).__init__(response_handler, response_json, response_objects=None, object_type='links') self.next_page_attr = 'since_id' def get_next_page(self, page_num=None, page_size=None): if self._response_json.get('total', 0) < 1: raise StopIteration() url = self.response.url # The since_id param is exclusive. Use the last id of the current page as # the since_id for the next page. since_id = str(self._response_json['links'][-1]['id']) if 'since_id' in url: # Replace the previous since_id parameter. url = re.sub(r'since_id=\d+', 'since_id={}'.format(since_id), url) else: if len(url.split('?')) > 1: # Add since_id to existing query parameters url += '&since_id={}'.format(since_id) else: # Add since_id as the first and only query parameter url += '?since_id={}'.format(since_id) # Save the raw requests response again. self.response = self.response_handler.api._get(url, raw_response=True) return self.response.json() def _handle_slice(self, slice_object): raise NotImplementedError( "the current Jira Links implementation does not support incremental APIs!" ) class ChatResultGenerator(BaseResultGenerator): """ Generator for ChatApi objects """ def __init__(self, response_handler, response_json): super(ChatResultGenerator, self).__init__(response_handler, response_json) self.next_page_attr = 'next_url' def process_page(self): return self.response_handler.deserialize(self._response_json) class ChatIncrementalResultGenerator(BaseResultGenerator): """ Generator for Chat Incremental Api objects """ def __init__(self, response_handler, response_json): super(ChatIncrementalResultGenerator, self).__init__(response_handler, response_json) self.next_page_attr = 'next_page' def process_page(self): return self.response_handler.deserialize(self._response_json) class ViewResultGenerator(BaseResultGenerator): def process_page(self): return self.response_handler.deserialize(self._response_json)
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/generator.py
generator.py
from io import BytesIO import json import logging from time import sleep, time from zenpy.lib.api_objects import (User, Macro, Identity, View, Organization, Group, GroupMembership, OrganizationField, TicketField, Comment as TicketComment, CustomFieldOption, Item, Variant, Ticket, Webhook, BaseObject) from zenpy.lib.api_objects.help_centre_objects import ( Section, Article, Comment, ArticleAttachment, Label, Category, Translation, Topic, Post, Subscription) from zenpy.lib.api_objects.talk_objects import (Call, CurrentQueueActivity, PhoneNumbers, ShowAvailability, AgentsOverview, AccountOverview, AgentsActivity, Leg) from zenpy.lib.exception import * from zenpy.lib.mapping import ZendeskObjectMapping, ChatObjectMapping, HelpCentreObjectMapping, TalkObjectMapping from zenpy.lib.request import * from zenpy.lib.response import * from zenpy.lib.util import as_plural, extract_id, is_iterable_but_not_string, json_encode_for_zendesk, all_are_none, \ all_are_not_none, json_encode_for_printing try: from collections.abc import Iterable except ImportError: from collections import Iterable __author__ = 'facetoe' log = logging.getLogger(__name__) class BaseApi(object): """ Base class for API. Responsible for submitting requests to Zendesk, controlling rate limiting and deserializing responses. """ def __init__(self, subdomain, session, timeout, ratelimit, ratelimit_budget, ratelimit_request_interval, cache, domain): self.domain = domain self.subdomain = subdomain self.session = session self.timeout = timeout self.ratelimit = ratelimit self.ratelimit_budget = ratelimit_budget self.cache = cache self.protocol = 'https' self.api_prefix = 'api/v2' self._url_template = "%(protocol)s://%(subdomain)s.%(domain)s/%(api_prefix)s" self.callsafety = {'lastcalltime': None, 'lastlimitremaining': None} self.ratelimit_request_interval = ratelimit_request_interval self._response_handlers = ( CountResponseHandler, DeleteResponseHandler, TagResponseHandler, SearchExportResponseHandler, SearchResponseHandler, JobStatusesResponseHandler, CombinationResponseHandler, ViewResponseHandler, SlaPolicyResponseHandler, RequestCommentResponseHandler, ZISIntegrationResponseHandler, WebhookInvocationsResponseHandler, WebhookInvocationAttemptsResponseHandler, WebhooksResponseHandler, GenericZendeskResponseHandler, HTTPOKResponseHandler, ) # An object is considered dirty when it has modifications. We want to ensure that it is successfully # accepted by Zendesk before cleaning it's dirty attributes, so we store it here until the response # is successfully processed, and then call the objects _clean_dirty() method. self._dirty_object = None def _post(self, url, payload, content_type=None, **kwargs): if 'data' in kwargs: if content_type: headers = {'Content-Type': content_type} else: headers = {'Content-Type': 'application/octet-stream'} else: headers = None response = self._call_api(self.session.post, url, json=self._serialize(payload), timeout=self.timeout, headers=headers, **kwargs) return self._process_response(response) def _put(self, url, payload): response = self._call_api(self.session.put, url, json=self._serialize(payload), timeout=self.timeout) return self._process_response(response) def _patch(self, url, payload): response = self._call_api(self.session.patch, url, json=self._serialize(payload), timeout=self.timeout) return self._process_response(response) def _delete(self, url, payload=None): response = self._call_api(self.session.delete, url, json=payload, timeout=self.timeout) return self._process_response(response) def _get(self, url, raw_response=False, **kwargs): response = self._call_api(self.session.get, url, timeout=self.timeout, **kwargs) if raw_response: return response else: return self._process_response(response) def _call_api(self, http_method, url, **kwargs): """ Execute a call to the Zendesk API. Handles rate limiting, checking the response from Zendesk and deserialization of the Zendesk response. All communication with Zendesk should go through this method. :param http_method: The requests method to call (eg post, put, get). :param url: The url to pass to to the requests method. :param kwargs: Any additional kwargs to pass on to requests. """ log.debug("{}: {} - {}".format(http_method.__name__.upper(), url, kwargs)) if self.ratelimit is not None: # This path indicates we're taking a proactive approach to not hit the rate limit response = self._ratelimit(http_method=http_method, url=url, **kwargs) else: response = http_method(url, **kwargs) # If we are being rate-limited, wait the required period before trying again. if response.status_code == 429: while 'retry-after' in response.headers and int( response.headers['retry-after']) > 0: retry_after_seconds = int(response.headers['retry-after']) log.warning( "Waiting for requested retry-after period: %s seconds" % retry_after_seconds) while retry_after_seconds > 0: retry_after_seconds -= 1 self.check_ratelimit_budget(1) log.debug(" -> sleeping: %s more seconds" % retry_after_seconds) sleep(1) response = http_method(url, **kwargs) self._check_response(response) self._update_callsafety(response) return response def check_ratelimit_budget(self, seconds_waited): """ If we have a ratelimit_budget, ensure it is not exceeded. """ if self.ratelimit_budget is not None: self.ratelimit_budget -= seconds_waited if self.ratelimit_budget < 1: raise RatelimitBudgetExceeded("Rate limit budget exceeded!") def _ratelimit(self, http_method, url, **kwargs): """ Ensure we do not hit the rate limit. """ def time_since_last_call(): if self.callsafety['lastcalltime'] is not None: return int(time() - self.callsafety['lastcalltime']) else: return None lastlimitremaining = self.callsafety['lastlimitremaining'] if time_since_last_call() is None or time_since_last_call() >= self.ratelimit_request_interval or \ lastlimitremaining >= self.ratelimit: response = http_method(url, **kwargs) else: # We hit our limit floor and aren't quite at ratelimit_request_interval value in seconds yet.. log.warning( "Safety Limit Reached of %s remaining calls and time since last call is under %s seconds" % (self.ratelimit, self.ratelimit_request_interval)) while time_since_last_call() < self.ratelimit_request_interval: remaining_sleep = int(self.ratelimit_request_interval - time_since_last_call()) log.debug(" -> sleeping: %s more seconds" % remaining_sleep) self.check_ratelimit_budget(1) sleep(1) response = http_method(url, **kwargs) self.callsafety['lastcalltime'] = time() self.callsafety['lastlimitremaining'] = int( response.headers.get('X-Rate-Limit-Remaining', 0)) return response def _update_callsafety(self, response): """ Update the callsafety data structure """ if self.ratelimit is not None: self.callsafety['lastcalltime'] = time() self.callsafety['lastlimitremaining'] = int( response.headers.get('X-Rate-Limit-Remaining', 0)) def _process_response(self, response, object_mapping=None): """ Attempt to find a ResponseHandler that knows how to process this response. If no handler can be found, raise an Exception. """ try: pretty_response = response.json() except ValueError: pretty_response = response for handler in self._response_handlers: if handler.applies_to(self, response): log.debug("{} matched: {}".format(handler.__name__, pretty_response)) r = handler(self, object_mapping).build(response) self._clean_dirty_objects() return r raise ZenpyException( "Could not handle response: {}".format(pretty_response)) def _clean_dirty_objects(self): """ Clear all dirty attributes for the last object or list of objects successfully submitted to Zendesk. """ if self._dirty_object is None: return if not is_iterable_but_not_string(self._dirty_object): self._dirty_object = [self._dirty_object] log.debug("Cleaning objects: {}".format(self._dirty_object)) for o in self._dirty_object: if isinstance(o, BaseObject): o._clean_dirty() self._dirty_object = None def _serialize(self, zenpy_object): """ Serialize a Zenpy object to JSON """ # If it's a dict this object has already been serialized. if not type(zenpy_object) == dict: log.debug("Setting dirty object: {}".format(zenpy_object)) self._dirty_object = zenpy_object return json.loads( json.dumps(zenpy_object, default=json_encode_for_zendesk)) def _query_zendesk(self, endpoint, object_type, *endpoint_args, **endpoint_kwargs): """ Query Zendesk for items. If an id or list of ids are passed, attempt to locate these items in the relevant cache. If they cannot be found, or no ids are passed, execute a call to Zendesk to retrieve the items. :param endpoint: target endpoint. :param object_type: object type we are expecting. :param endpoint_args: args for endpoint :param endpoint_kwargs: kwargs for endpoint :return: either a ResultGenerator or a Zenpy object. """ _id = endpoint_kwargs.get('id', None) if _id: item = self.cache.get(object_type, _id) if item: return item else: return self._get(url=self._build_url( endpoint(*endpoint_args, **endpoint_kwargs))) elif 'ids' in endpoint_kwargs: cached_objects = [] # Check to see if we have all objects in the cache. # If we are missing even one we request them all again. # This could be optimized to only request the missing objects. for _id in endpoint_kwargs['ids']: obj = self.cache.get(object_type, _id) if not obj: return self._get( self._build_url(endpoint=endpoint( *endpoint_args, **endpoint_kwargs))) cached_objects.append(obj) return ZendeskResultGenerator(self, {}, response_objects=cached_objects, object_type=object_type) else: return self._get( self._build_url( endpoint=endpoint(*endpoint_args, **endpoint_kwargs))) def _check_response(self, response): """ Check the response code returned by Zendesk. If it is outside the 200 range, raise an exception of the correct type. :param response: requests Response object. """ if response.status_code > 299 or response.status_code < 200: log.debug("Received response code [%s] - headers: %s" % (response.status_code, str(response.headers))) try: _json = response.json() err_type = _json.get("error", '') if err_type == 'RecordNotFound': raise RecordNotFoundException(json.dumps(_json), response=response) elif err_type == "TooManyValues": raise TooManyValuesException(json.dumps(_json), response=response) elif err_type == "invalid" and response.status_code == 422: raise SearchResponseLimitExceeded(json.dumps(_json), response=response) else: raise APIException(json.dumps(_json), response=response) except ValueError: response.raise_for_status() def _build_url(self, endpoint, api_prefix=None): """ Build complete URL """ if not issubclass(type(self), ChatApiBase) and not self.subdomain: raise ZenpyException( "subdomain is required when accessing the Zendesk API!") endpoint.scheme = os.environ.get("ZENPY_FORCE_SCHEME", self.protocol) endpoint.netloc = self.base_url endpoint.prefix_path(api_prefix or self.api_prefix) return endpoint.build() @property def base_url(self): override = os.environ.get("ZENPY_FORCE_NETLOC") if override: return override # This is for Zendesk APIs as they require a subdomain elif self.subdomain: return '{}.{}'.format(self.subdomain, self.domain) # Chat APIs do not require a subdomain (it is always zopim.com) else: return self.domain class Api(BaseApi): """ Most general API class. It is callable, and is suitable for basic API endpoints. This class also contains many methods for retrieving specific objects or collections of objects. These methods are called by the classes found in zenpy.lib.api_objects. """ def __init__(self, config, object_type, endpoint=None): self.object_type = object_type self.endpoint = endpoint or EndpointFactory(as_plural(object_type)) super(Api, self).__init__(**config) self._object_mapping = ZendeskObjectMapping(self) def __call__(self, *args, **kwargs): return self._query_zendesk(self.endpoint, self.object_type, *args, **kwargs) def _get_user(self, user_id): if int(user_id) < 0: return None return self._query_zendesk(EndpointFactory('users'), 'user', id=user_id) def _get_users(self, user_ids): return self._query_zendesk(endpoint=EndpointFactory('users'), object_type='user', ids=user_ids) def _get_comment(self, comment_id): return self._query_zendesk( endpoint=EndpointFactory('tickets').comments, object_type='comment', id=comment_id) def _get_organization(self, organization_id): return self._query_zendesk(endpoint=EndpointFactory('organizations'), object_type='organization', id=organization_id) def _get_group(self, group_id): return self._query_zendesk(endpoint=EndpointFactory('groups'), object_type='group', id=group_id) def _get_brand(self, brand_id): return self._query_zendesk(endpoint=EndpointFactory('brands'), object_type='brand', id=brand_id) def _get_ticket(self, ticket_id): return self._query_zendesk(endpoint=EndpointFactory('tickets'), object_type='ticket', id=ticket_id) def _get_sharing_agreements(self, sharing_agreement_ids): sharing_agreements = [] for _id in sharing_agreement_ids: sharing_agreement = self._query_zendesk( endpoint=EndpointFactory('sharing_agreements'), object_type='sharing_agreement', id=_id) if sharing_agreement: sharing_agreements.append(sharing_agreement) return sharing_agreements def _get_problem(self, problem_id): return self._query_zendesk(EndpointFactory('tickets'), 'ticket', id=problem_id) # This will be deprecated soon - https://developer.zendesk.com/rest_api/docs/web-portal/forums def _get_forum(self, forum_id): return forum_id def _get_restricted_brands(self, brand_ids): for brand_id in brand_ids: yield self._query_zendesk(EndpointFactory('brands'), 'brand', id=brand_id) def _get_restricted_organizations(self, organization_ids): for org_id in organization_ids: yield self._query_zendesk(EndpointFactory("organizations"), 'organization', id=org_id) def _get_ticket_fields(self, ticket_field_ids): for field_id in ticket_field_ids: yield self._query_zendesk(EndpointFactory('ticket_fields'), 'ticket_field', id=field_id) def _get_view(self, view_id): return self._query_zendesk(EndpointFactory('views'), 'view', id=view_id) def _get_topic(self, forum_topic_id): return self._query_zendesk(EndpointFactory('help_centre').topics, 'topic', id=forum_topic_id) def _get_category(self, category_id): return self._query_zendesk(EndpointFactory('help_centre').categories, 'category', id=category_id) def _get_macro(self, macro_id): return self._query_zendesk(EndpointFactory('macros'), 'macro', id=macro_id) def _get_sla(self, sla_id): return self._query_zendesk(EndpointFactory('sla_policies'), 'sla_policy', id=sla_id) def _get_department(self, department_id): return self._query_zendesk(EndpointFactory('chats').departments, 'department', id=department_id) def _get_zendesk_ticket(self, ticket_id): return self._query_zendesk(EndpointFactory('tickets'), 'ticket', id=ticket_id) def _get_user_segment(self, user_segment_id): return self._query_zendesk( EndpointFactory('help_centre').user_segments, 'segment', id=user_segment_id) def _get_section(self, section_id): return self._query_zendesk(EndpointFactory('help_centre').sections, 'section', id=section_id) def _get_article(self, article_id): return self._query_zendesk(EndpointFactory('help_centre').articles, 'article', id=article_id) def _get_custom_role(self, custom_role_id): return self._query_zendesk(EndpointFactory('custom_agent_roles'), 'custom_role', id=custom_role_id) # TODO: Implement these methods when the NPS API is done def _get_delivery(self, delivery_id): pass def _get_survey(self, survery_id): pass def _get_permission_group(self, permission_group_id): return self._query_zendesk( EndpointFactory('help_centre').permission_groups, 'permission_group', id=permission_group_id) def _get_default_locale(self, locale_id): return self._query_zendesk(EndpointFactory('locales'), 'locale', id=locale_id) # There are no methods to get an invocation by its id, so just fake it to pass nosetests def _get_invocation(self, invocation_id): return None class CRUDApi(Api): """ CRUDApi supports create/update/delete operations """ def create(self, api_objects, **kwargs): """ Create (POST) one or more API objects. Before being submitted to Zendesk the object or objects will be serialized to JSON. :param api_objects: object or objects to create """ return CRUDRequest(self).post(api_objects, **kwargs) def update(self, api_objects, **kwargs): """ Update (PUT) one or more API objects. Before being submitted to Zendesk the object or objects will be serialized to JSON. :param api_objects: object or objects to update """ return CRUDRequest(self).put(api_objects) def delete(self, api_objects, **kwargs): """ Delete (DELETE) one or more API objects. After successfully deleting the objects from the API they will also be removed from the relevant Zenpy caches. :param api_objects: object or objects to delete """ return CRUDRequest(self).delete(api_objects) class CRUDExternalApi(CRUDApi): """ The CRUDExternalApi exposes some extra methods for operating on external ids. """ def update_by_external_id(self, api_objects): """ Update (PUT) one or more API objects by external_id. :param api_objects: """ if not isinstance(api_objects, Iterable): api_objects = [api_objects] return CRUDRequest(self).put(api_objects, update_many_external=True) def delete_by_external_id(self, api_objects): """ Delete (DELETE) one or more API objects by external_id. :param api_objects: """ if not isinstance(api_objects, Iterable): api_objects = [api_objects] return CRUDRequest(self).delete(api_objects, destroy_many_external=True) class SuspendedTicketApi(Api): """ The SuspendedTicketApi adds some SuspendedTicket specific functionality """ def recover(self, tickets): """ Recover (PUT) one or more SuspendedTickets. :param tickets: one or more SuspendedTickets to recover """ return SuspendedTicketRequest(self).put(tickets) def delete(self, tickets): """ Delete (DELETE) one or more SuspendedTickets. :param tickets: one or more SuspendedTickets to delete """ return SuspendedTicketRequest(self).delete(tickets) class TaggableApi(Api): """ TaggableApi supports getting, setting, adding and deleting tags. """ def add_tags(self, id, tags): """ Add (PUT) one or more tags. :param id: the id of the object to tag :param tags: array of tags to apply to object """ return TagRequest(self).put(tags, id) def set_tags(self, id, tags): """ Set (POST) one or more tags. :param id: the id of the object to tag :param tags: array of tags to apply to object """ return TagRequest(self).post(tags, id) def delete_tags(self, id, tags): """ Delete (DELETE) one or more tags. :param id: the id of the object to delete tag from :param tags: array of tags to delete from object """ return TagRequest(self).delete(tags, id) def tags(self, ticket_id): """ Lists the most popular recent tags in decreasing popularity from a specific ticket. """ return self._query_zendesk(self.endpoint.tags, 'tag', id=ticket_id) # noinspection PyShadowingBuiltins class RateableApi(Api): """ Supports rating with a SatisfactionRating """ def rate(self, id, rating): """ Add (POST) a satisfaction rating. :param id: id of object to rate :param rating: SatisfactionRating """ return RateRequest(self).post(rating, id) class IncrementalApi(Api): """ IncrementalApi supports the incremental endpoint. """ def incremental(self, start_time, include=None, per_page=None): """ Retrieve bulk data from the incremental API. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param start_time: The time of the oldest object you are interested in. """ return self._query_zendesk(self.endpoint.incremental, self.object_type, start_time=start_time, include=include, per_page=per_page) class ChatIncrementalApi(Api): """ ChatIncrementalApi supports the chat incremental endpoint. """ def incremental(self, start_time, **kwargs): """ Retrieve bulk data from the chat incremental API. :param fields: list of fields to retrieve. `Chat API Docs <https://developer.zendesk.com/rest_api/docs/chat/incremental_export#usage-notes-resource-expansion>`__. :param start_time: The time of the oldest object you are interested in. """ return self._query_zendesk(self.endpoint.incremental, self.object_type, start_time=start_time, **kwargs) class UserIdentityApi(Api): def __init__(self, config): super(UserIdentityApi, self).__init__(config, object_type='identity', endpoint=EndpointFactory('users').identities) @extract_id(User, Identity) def show(self, user, identity): """ Show the specified identity for the specified user. :param user: user id or User object :param identity: identity id object :return: Identity """ url = self._build_url(self.endpoint.show(user, identity)) return self._get(url) @extract_id(User) def create(self, user, identity): """ Create an additional identity for the specified user :param user: User id or object :param identity: Identity object to be created """ return UserIdentityRequest(self).post(user_id=user, identity=identity) @extract_id(User) def update(self, user, identity): """ Update specified identity for the specified user :param user: User object or id :param identity: Identity object to be updated. :return: The updated Identity """ return UserIdentityRequest(self).put(self.endpoint.update, user_id=user, identity_id=identity.id, identity=identity) @extract_id(User, Identity) def make_primary(self, user, identity): """ Set the specified user as primary for the specified user. :param user: User object or id :param identity: Identity object or id :return: list of user's Identities """ return UserIdentityRequest(self).put(self.endpoint.make_primary, user_id=user, identity_id=identity) @extract_id(User, Identity) def request_verification(self, user, identity): """ Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object """ return UserIdentityRequest(self).put( self.endpoint.request_verification, user_id=user, identity_id=identity) @extract_id(User, Identity) def verify(self, user, identity): """ Verify an identity for a user :param user: User id or object :param identity: Identity id or object :return: the verified Identity """ return UserIdentityRequest(self).put(self.endpoint.verify, user_id=user, identity_id=identity) @extract_id(User, Identity) def delete(self, user, identity): """ Deletes the identity for a given user :param user: User id or object :param identity: Identity id or object :return: requests Response object """ return UserIdentityRequest(self).delete(user, identity) class UserSearchApi(Api): def __init__(self, config): super(UserSearchApi, self).__init__(config, object_type='user', endpoint=EndpointFactory('users').search) def __call__(self, query=None, external_id=None): """ Exposes: GET /api/v2/users/search.json?query={query} GET /api/v2/users/search.json?external_id={external_id} For more info see: https://developer.zendesk.com/rest_api/docs/support/users#search-users :param query: str of some user property like email :param external_id: external_id of resource """ try: assert query or external_id assert not (query and external_id) except AssertionError: raise ZenpyException( "Must provide either `query` or `external_id` arg to search. Not Both." ) if query: params = dict(query=query) if external_id: params = dict(external_id=external_id) url = self._build_url(self.endpoint()) return self._get(url, params=params) class UserApi(IncrementalApi, CRUDExternalApi, TaggableApi): """ The UserApi adds some User specific functionality """ def __init__(self, config): super(UserApi, self).__init__(config, object_type='user') self.identities = UserIdentityApi(config) self.search = UserSearchApi(config) @extract_id(User) def groups(self, user, include=None): """ Retrieve the groups for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.groups, 'group', id=user, include=include) @extract_id(User) def organizations(self, user, include=None): """ Retrieve the organizations for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.organizations, 'organization', id=user, include=include) @extract_id(User) def requested(self, user, include=None): """ Retrieve the requested tickets for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.requested, 'ticket', id=user, include=include) @extract_id(User) def cced(self, user, include=None): """ Retrieve the tickets this user is cc'd into. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.cced, 'ticket', id=user, include=include) @extract_id(User) def assigned(self, user, include=None): """ Retrieve the assigned tickets for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.assigned, 'ticket', id=user, include=include) @extract_id(User) def group_memberships(self, user, include=None): """ Retrieve the group memberships for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.group_memberships, 'group_membership', id=user, include=include) def requests(self, **kwargs): return self._query_zendesk(self.endpoint.requests, 'request', **kwargs) @extract_id(User) def related(self, user): """ Returns the UserRelated information for the requested User :param user: User object or id :return: UserRelated """ return self._query_zendesk(self.endpoint.related, 'user_related', id=user) def me(self, include=None): """ Return the logged in user :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading#abilities>`__. """ return self._query_zendesk(self.endpoint.me, 'user', include=include) @extract_id(User) def merge(self, source_user, dest_user): """ Merge the user provided in source_user into dest_user :param source_user: User object or id of user to be merged :param dest_user: User object or id to merge into :return: The merged User """ return UserMergeRequest(self).put(source_user, dest_user) @extract_id(User) def user_fields(self, user): """ Retrieve the user fields for this user. :param user: User object or id """ return self._query_zendesk(self.endpoint.user_fields, 'user_field', id=user) @extract_id(User) def organization_memberships(self, user): """ Retrieve the organization memberships for this user. :param user: User object or id """ return self._query_zendesk(self.endpoint.organization_memberships, 'organization_membership', id=user) def create_or_update(self, users): """ Creates a user (POST) if the user does not already exist, or updates an existing user identified by e-mail address or external ID. :param users: User object or list of User objects :return: the created/updated User or a JobStatus object if a list was passed """ return CRUDRequest(self).post(users, create_or_update=True) @extract_id(User) def permanently_delete(self, user): """ Permanently delete user. User should be softly deleted first. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/users#permanently-delete-user>`__. Note: This endpoint does not support multiple ids or list of `User` objects. :param user: User object or id. :return: User object with `permanently_deleted` status """ url = self._build_url(self.endpoint.deleted(id=user)) deleted_user = self._delete(url) self.cache.delete(deleted_user) return deleted_user def deleted(self): """ List Deleted Users. These are users that have been deleted but not permanently yet. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/users#permanently-delete-user>`__. :return: """ return self._get(self._build_url(self.endpoint.deleted())) @extract_id(User) def skips(self, user): """ Skips for user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__. """ return self._get(self._build_url(self.endpoint.skips(id=user))) @extract_id(User) def set_password(self, user, password): """ Sets the password for the passed user. Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/support/users#set-a-users-password>`__. :param user: User object or id :param password: new password """ url = self._build_url(self.endpoint.set_password(id=user)) return self._post(url, payload=dict(password=password)) class AttachmentApi(Api): def __init__(self, config): super(AttachmentApi, self).__init__(config, object_type='attachment') def __call__(self, *args, **kwargs): if 'id' not in kwargs: raise ZenpyException("Attachment endpoint requires an id") return Api.__call__(self, **kwargs) def upload(self, fp, token=None, target_name=None, content_type=None): """ Upload a file to Zendesk. :param fp: file object, StringIO instance, content, or file path to be uploaded :param token: upload token for uploading multiple files :param target_name: name of the file inside Zendesk :return: :class:`Upload` object containing a token and other information see Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/attachments#uploading-files>`__. """ return UploadRequest(self).post(fp, token=token, target_name=target_name, content_type=content_type) def download(self, attachment_id, destination=None): """ Download an attachment from Zendesk. :param attachment_id: id of the attachment to download :param destination: destination path. If a directory, the file will be placed in the directory with the filename from the Attachment object. If None, write to a BytesIO object. :return: the path the file was written to or the BytesIO object """ attachment = self(id=attachment_id) if not destination: return self._write_to_stream(attachment.content_url, BytesIO()) if os.path.isdir(destination): destination = os.path.join(destination, attachment.file_name) with open(destination, 'wb') as f: self._write_to_stream(attachment.content_url, f) return destination def _write_to_stream(self, source_url, stream): r = self.session.get(source_url, stream=True) for chunk in r.iter_content(chunk_size=None): if chunk: stream.write(chunk) return stream class EndUserApi(CRUDApi): """ EndUsers can only update. """ def __init__(self, config): super(EndUserApi, self).__init__(config, object_type='user', endpoint=EndpointFactory('end_user')) def __call__(self, *args, **kwargs): raise ZenpyException("EndUserApi is not callable!") @extract_id(User) def show(self, user): return self._query_zendesk(self.endpoint, object_type='user', id=user) def delete(self, api_objects, **kwargs): raise ZenpyException("EndUsers cannot delete!") def create(self, api_objects, **kwargs): raise ZenpyException("EndUsers cannot create!") class OrganizationApi(TaggableApi, IncrementalApi, CRUDExternalApi): def __init__(self, config): super(OrganizationApi, self).__init__(config, object_type='organization') @extract_id(Organization) def users(self, organization, include=None): return self._get( self._build_url( self.endpoint.users(id=organization, include=include))) @extract_id(Organization) def organization_fields(self, organization): """ Retrieve the organization fields for this organization. :param organization: Organization object or id """ return self._query_zendesk(self.endpoint.organization_fields, 'organization_field', id=organization) @extract_id(Organization) def organization_memberships(self, organization): """ Retrieve tche organization fields for this organization. :param organization: Organization object or id """ return self._query_zendesk(self.endpoint.organization_memberships, 'organization_membership', id=organization) def external(self, external_id, include=None): """ Locate an Organization by it's external_id attribute. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param external_id: external id of organization """ return self._query_zendesk(self.endpoint.external, 'organization', id=external_id, include=include) def requests(self, **kwargs): return self._query_zendesk(self.endpoint.requests, 'request', **kwargs) def create_or_update(self, organization): """ Creates an organization if it doesn't already exist, or updates an existing organization identified by ID or external ID :param organization: Organization object :return: the created/updated Organization """ return CRUDRequest(self).post(organization, create_or_update=True) class OrganizationMembershipApi(CRUDApi): """ The OrganizationMembershipApi allows the creation and deletion of Organization Memberships """ def __init__(self, config): super(OrganizationMembershipApi, self).__init__(config, object_type='organization_membership') def update(self, items, **kwargs): raise ZenpyException("You cannot update Organization Memberships!") class OrganizationFieldsApi(CRUDApi): def __init__(self, config): super(OrganizationFieldsApi, self).__init__(config, object_type='organization_field') @extract_id(OrganizationField) def reorder(self, organization_fields): """ Reorder organization fields. :param organization_fields: list of OrganizationField objects or ids in the desired order. """ return OrganizationFieldReorderRequest(self).put(organization_fields) class SatisfactionRatingApi(Api): def __init__(self, config): super(SatisfactionRatingApi, self).__init__(config, object_type='satisfaction_rating') @extract_id(Ticket) def create(self, ticket, satisfaction_rating): """ Create/update a Satisfaction Rating for a ticket. :param ticket: Ticket object or id :param satisfaction_rating: SatisfactionRating object. """ return SatisfactionRatingRequest(self).post(ticket, satisfaction_rating) class MacroApi(CRUDApi): def __init__(self, config): super(MacroApi, self).__init__(config, object_type='macro') @extract_id(Macro) def apply(self, macro): """ Show what a macro would do Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/macros#show-changes-to-ticket>`__. :param macro: Macro object or id. """ return self._query_zendesk(self.endpoint.apply, 'result', id=macro) class TicketApi(RateableApi, TaggableApi, IncrementalApi, CRUDApi): """ The TicketApi adds some Ticket specific functionality """ def __init__(self, config): super(TicketApi, self).__init__(config, object_type='ticket') @extract_id(Organization) def organizations(self, organization, include=None): """ Retrieve the tickets for this organization. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param organization: Organization object or id """ return self._query_zendesk(self.endpoint.organizations, 'ticket', id=organization, include=include) def recent(self, include=None): """ Retrieve the most recent tickets """ return self._query_zendesk(self.endpoint.recent, 'ticket', id=None, include=include) @extract_id(Ticket) def comments(self, ticket, include_inline_images=False): """ Retrieve the comments for a ticket. :param ticket: Ticket object or id :param include_inline_images: Boolean. If `True`, inline image attachments will be returned in each comments' `attachments` field alongside non-inline attachments """ return self._query_zendesk( self.endpoint.comments, 'comment', id=ticket, include_inline_images=repr(include_inline_images).lower()) @extract_id(Ticket, TicketComment) def comment_redact(self, ticket, comment, text): """ Redact text from ticket comment. `See Zendesk API docs <https://developer.zendesk.com/rest_api/docs/support/ticket_comments#redact-string-in-comment>`_ :param ticket: Ticket object or id :param comment: Comment object or id :param text: Text to be redacted from comment :return Comment: Ticket Comment object """ return self._put( self._build_url(self.endpoint.comments.redact(ticket, comment)), {'text': text}) def permanently_delete(self, tickets): """ Permanently delete ticket. `See Zendesk API docs <https://developer.zendesk.com/rest_api/docs/support/tickets#delete-ticket-permanently>`_ Ticket should be softly deleted first with regular `delete` method. :param tickets: Ticket object or list of tickets objects :return: JobStatus object """ endpoint_kwargs = dict() if isinstance(tickets, Iterable): endpoint_kwargs['destroy_ids'] = [i.id for i in tickets] else: endpoint_kwargs['id'] = tickets.id url = self._build_url(self.endpoint.deleted(**endpoint_kwargs)) deleted_ticket_job_id = self._delete(url) self.cache.delete(tickets) return deleted_ticket_job_id def deleted(self): """ List Deleted Tickets. These are tickets that have been deleted but not permanently yet. See Permanently delete ticket in `Zendesk API docs <https://developer.zendesk.com/rest_api/docs/support/tickets#delete-ticket-permanently>`_ :return: ResultGenerator with Tickets objects with length 0 of no deleted tickets exist. """ return self._get(self._build_url(self.endpoint.deleted())) def events(self, start_time, include=None, per_page=None): """ Retrieve TicketEvents :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param start_time: time to retrieve events from. """ return self._query_zendesk(self.endpoint.events, 'ticket_event', start_time=start_time, include=include, per_page=per_page) @extract_id(Ticket) def audits(self, ticket=None, include=None, **kwargs): """ Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketCursorGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id """ if ticket is not None: return self._query_zendesk(self.endpoint.audits, 'ticket_audit', id=ticket, include=include) else: return self._query_zendesk(self.endpoint.audits.cursor, 'ticket_audit', include=include, **kwargs) @extract_id(Ticket) def incidents(self, ticket): """ Retrieve incidents related to Ticket. :param ticket: Ticket object or id """ return self._query_zendesk(self.endpoint.incidents, 'ticket', id=ticket) @extract_id(Ticket) def metrics(self, ticket): """ Retrieve TicketMetric. :param ticket: Ticket object or id """ return self._query_zendesk(self.endpoint.metrics, 'ticket_metric', id=ticket) def metrics_incremental(self, start_time): """ Retrieve TicketMetric incremental :param start_time: time to retrieve events from. """ return self._query_zendesk(self.endpoint.metrics.incremental, 'ticket_metric_events', start_time=start_time) @extract_id(Ticket, Macro) def show_macro_effect(self, ticket, macro): """ Apply macro to ticket. Returns what it *would* do, does not alter the ticket. :param ticket: Ticket or ticket id to target :param macro: Macro or macro id to use """ url = self._build_url(self.endpoint.macro(ticket, macro)) macro_effect = self._get(url) macro_effect._set_dirty() return macro_effect @extract_id(Ticket) def merge(self, target, source, target_comment=None, source_comment=None): """ Merge the ticket(s) or ticket ID(s) in source into the target ticket. :param target: ticket id or object to merge tickets into :param source: ticket id, object or list of tickets or ids to merge into target :param source_comment: optional comment for the source ticket(s) :param target_comment: optional comment for the target ticket :return: a JobStatus object """ return TicketMergeRequest(self).post(target, source, target_comment=target_comment, source_comment=source_comment) @extract_id(Ticket) def skips(self, ticket): """ Skips for ticket See Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_skips>`__. """ return self._get(self._build_url(self.endpoint.skips(id=ticket))) def incremental(self, start_time=None, paginate_by_time=True, cursor=None, include=None, per_page=None): """ Incrementally retrieve Tickets. If paginate_by_time is True, a ZendeskResultGenerator is returned to handle time based pagination. This is defaulted to True for backwards compatibility but is not recommended by Zendesk. If paginate_by_time is False, a TicketCursorGenerator is returned to handle cursor based pagination. This is recommended by Zendesk. The TicketCursorGenerator allows you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for ticket in reversed(zenpy_client.tickets.incremental(cursor='xxx')): print(ticket) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/support/incremental_export#cursor-based-incremental-exports>`__ for information on additional parameters. :param start_time: the time of the oldest object you are interested in, applies to both time/cursor based pagination. :param paginate_by_time: True to use time based pagination, False to use cursor based pagination. :param cursor: cursor value of the page you are interested in, can't be set with start_time. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param per_page: number of results per page, up to max 1000 """ if (all_are_none(start_time, cursor) or all_are_not_none(start_time, cursor)): raise ValueError( 'You must set either start_time or cursor but not both') if start_time and paginate_by_time is True: return super(TicketApi, self).incremental(start_time=start_time, include=include, per_page=per_page) elif start_time and paginate_by_time is False: return self._query_zendesk(self.endpoint.incremental.cursor_start, self.object_type, start_time=start_time, include=include, per_page=per_page) elif cursor and paginate_by_time is False: return self._query_zendesk(self.endpoint.incremental.cursor, self.object_type, cursor=cursor, include=include, per_page=per_page) else: raise ValueError( "Can't set cursor param and paginate_by_time=True") class SkipApi(CRUDApi): def __init__(self, config): super(SkipApi, self).__init__(config, object_type='skip', endpoint=EndpointFactory('skips')) def delete(self, api_objects, **kwargs): raise NotImplementedError("Cannot delete Skip objects") def update(self, api_objects, **kwargs): raise NotImplementedError("Cannot update Skip objects") class TicketImportAPI(CRUDApi): def __init__(self, config): super(TicketImportAPI, self).__init__(config, object_type='ticket', endpoint=EndpointFactory('ticket_import')) def __call__(self, *args, **kwargs): raise ZenpyException("This endpoint cannot be called directly!") def update(self, items, **kwargs): raise ZenpyException( "You cannot update objects using ticket_import endpoint!") def delete(self, api_objects, **kwargs): raise ZenpyException( "You cannot delete objects using the ticket_import endpoint!") class TicketCustomFieldOptionApi(Api): def __init__(self, config): super(TicketCustomFieldOptionApi, self).__init__(config, object_type='custom_field_option', endpoint=EndpointFactory('ticket_field_options')) @extract_id(TicketField, CustomFieldOption) def show(self, ticket_field, custom_field_option): """ Return CustomFieldOption :param ticket_field: TicketFieldOption or id :param custom_field_option: CustomFieldOption or id """ return self._query_zendesk(self.endpoint.show, 'custom_field_option', ticket_field, custom_field_option) @extract_id(TicketField) def create_or_update(self, ticket_field, custom_field_option): """ Create or update a CustomFieldOption for a TicketField. If passed CustomFieldOption has no id, a new option will be created, otherwise it is updated - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/ticket_fields#create-or-update-a-ticket-field-option>`__. :param ticket_field: TicketField object or id :param custom_field_option: CustomFieldOption object """ return TicketFieldOptionRequest(self).post(ticket_field, custom_field_option) @extract_id(TicketField, CustomFieldOption) def delete(self, ticket_field, custom_field_option): """ Delete a CustomFieldOption. :param ticket_field: TicketField object or id. :param custom_field_option: CustomFieldOption """ return TicketFieldOptionRequest(self).delete(ticket_field, custom_field_option) class TicketFieldApi(CRUDApi): def __init__(self, config): super(TicketFieldApi, self).__init__(config, 'ticket_field') self.options = TicketCustomFieldOptionApi(config) class VariantApi(Api): def __init__(self, config, endpoint): super(VariantApi, self).__init__(config, object_type='variant', endpoint=endpoint) @extract_id(Item, Variant) def show(self, item, variant): """ Show a variant. :param item: Item object or id :param variant: Variant object or id :return: """ url = self._build_url(self.endpoint.show(item, variant)) return self._get(url) @extract_id(Item) def create(self, item, variant): """ Create one or more variants. :param item: Item object or id :param variant: Variant object or list of objects """ return VariantRequest(self).post(item, variant) @extract_id(Item) def update(self, item, variant): """ Update one or more variants. :param item: Item object or id :param variant: Variant object or list of objects """ return VariantRequest(self).put(item, variant) @extract_id(Item, Variant) def delete(self, item, variant): """ Delete a variant. :param item: Item object or id :param variant: Variant object or id """ return VariantRequest(self).delete(item, variant) class DynamicContentApi(CRUDApi): def __init__(self, config): super(DynamicContentApi, self).__init__(config, object_type='item', endpoint=EndpointFactory('dynamic_contents')) self.variants = VariantApi(config, endpoint=self.endpoint.variants) class TriggerApi(CRUDApi): pass class AutomationApi(CRUDApi): pass class TargetApi(CRUDApi): pass class BrandApi(CRUDApi): pass class TicketFormApi(CRUDApi): pass class RequestAPI(CRUDApi): def __init__(self, config): super(RequestAPI, self).__init__(config, object_type='request') def open(self): """ Return all open requests """ return self._query_zendesk(self.endpoint.open, 'request') def solved(self): """ Return all solved requests """ return self._query_zendesk(self.endpoint.solved, 'request') def ccd(self): """ Return all ccd requests """ return self._query_zendesk(self.endpoint.ccd, 'request') def comments(self, request_id): """ Return comments for request """ return self._query_zendesk(self.endpoint.comments, 'comment', id=request_id) def delete(self, api_objects, **kwargs): raise ZenpyException("You cannot delete requests!") def search(self, *args, **kwargs): """ Search for requests. See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/requests#searching-requests>`__ for more information on the syntax. """ return self._query_zendesk(self.endpoint.search, 'request', *args, **kwargs) class SharingAgreementAPI(CRUDApi): def __init__(self, config): super(SharingAgreementAPI, self).__init__(config, object_type='sharing_agreement') class GroupApi(CRUDApi): def __init__(self, config): super(GroupApi, self).__init__(config, object_type='group') @extract_id(Group) def users(self, group, include=None): return self._get( self._build_url(self.endpoint.users(id=group, include=include))) @extract_id(Group) def memberships(self, group, include=None): """ Return the GroupMemberships for this group. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param group: Group object or id """ return self._get( self._build_url( self.endpoint.memberships(id=group, include=include))) def assignable(self): """ Return Groups that are assignable. """ return self._get(self._build_url(self.endpoint.assignable())) @extract_id(Group) def memberships_assignable(self, group, include=None): """ Return memberships that are assignable for this group. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param group: Group object or id """ return self._get( self._build_url( self.endpoint.memberships_assignable(id=group, include=include))) class ViewApi(CRUDApi): def __init__(self, config): super(ViewApi, self).__init__(config, object_type='view') def active(self, include=None): """ Return all active views. """ return self._get(self._build_url( self.endpoint.active(include=include))) def compact(self, include=None): """ Return compact views - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/views#list-views---compact>`__ """ return self._get( self._build_url(self.endpoint.compact(include=include))) @extract_id(View) def execute(self, view, include=None): """ Execute a view. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param view: View or view id """ return self._get( self._build_url(self.endpoint.execute(id=view, include=include))) @extract_id(View) def tickets(self, view, include=None): """ Return the tickets in a view. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param view: View or view id """ return self._get( self._build_url(self.endpoint.tickets(id=view, include=include))) @extract_id(View) def count(self, view, include=None): """ Return a ViewCount for a view. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param view: View or view id """ return self._get( self._build_url(self.endpoint.count(id=view, include=include))) @extract_id(View) def count_many(self, views, include=None): """ Return many ViewCounts. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param views: iterable of View or view ids """ return self._get( self._build_url(self.endpoint(count_many=views, include=include))) @extract_id(View) def export(self, view, include=None): """ Export a view. Returns an Export object. :param include: list of objects to sideload. `Side-loading API Docs :param view: View or view id :return: """ return self._get( self._build_url(self.endpoint.export(id=view, include=include))) def search(self, *args, **kwargs): """ Search views. See Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/views#search-views>`__. :param args: query is the only accepted arg. :param kwargs: search parameters """ return self._get(self._build_url(self.endpoint.search(*args, **kwargs))) # TODO: https://github.com/facetoe/zenpy/issues/123 def _get_sla(self, sla_id): pass class GroupMembershipApi(CRUDApi): def __init__(self, config): super(GroupMembershipApi, self).__init__(config, object_type='group_membership') def update(self, api_objects, **kwargs): raise ZenpyException("Cannot update GroupMemberships") def assignable(self): """ Return GroupMemberships that are assignable. """ return self._get(self._build_url(self.endpoint.assignable())) @extract_id(User, GroupMembership) def make_default(self, user, group_membership): """ Set the passed GroupMembership as default for the specified user. :param user: User object or id :param group_membership: GroupMembership object or id """ return self._put(self._build_url( self.endpoint.make_default(user, group_membership)), payload={}) class JiraLinkApi(CRUDApi): def __init__(self, config): super(JiraLinkApi, self).__init__(config, object_type='link') self.api_prefix = "api" def delete(self, link): url = self._build_url(self.endpoint(id=link.id), delete=True) deleted_user = self._delete(url) self.cache.delete(deleted_user) return deleted_user def update(self, api_objects, **kwargs): raise ZenpyException("Cannot update Jira Links!") def _build_url(self, endpoint, delete=False): if delete: return super(JiraLinkApi, self)._build_url(endpoint).replace(".json", "") elif not endpoint.path == 'services/jira/links.json': return super(JiraLinkApi, self)._build_url(endpoint, 'api/v2') else: return super(JiraLinkApi, self)._build_url(endpoint) class SlaPolicyApi(CRUDApi): def __init__(self, config): super(SlaPolicyApi, self).__init__(config, object_type='sla_policy') def create(self, api_objects, **kwargs): if isinstance(api_objects, Iterable): raise ZenpyException("Cannot create multiple sla policies!") super(SlaPolicyApi, self).create(api_objects, **kwargs) def update(self, api_objects, **kwargs): if isinstance(api_objects, Iterable): raise ZenpyException("Cannot update multiple sla policies!") super(SlaPolicyApi, self).update(api_objects, **kwargs) def definitions(self): url = self._build_url(self.endpoint.definitions()) return self._get(url) class RecipientAddressApi(CRUDApi): def __init__(self, config): super(RecipientAddressApi, self).__init__(config, object_type='recipient_address') class ChatApiBase(Api): """ Implements most generic ChatApi functionality. Most if the actual work is delegated to Request and Response handlers. """ def __init__(self, config, endpoint, request_handler=None): super(ChatApiBase, self).__init__(config, object_type='chat', endpoint=endpoint) self.domain = 'www.zopim.com' self.subdomain = '' self._request_handler = request_handler or ChatApiRequest self._object_mapping = ChatObjectMapping(self) self._response_handlers = (DeleteResponseHandler, ChatSearchResponseHandler, ChatResponseHandler, AccountResponseHandler, AgentResponseHandler, VisitorResponseHandler, ShortcutResponseHandler, TriggerResponseHandler, BanResponseHandler, DepartmentResponseHandler, GoalResponseHandler) def create(self, *args, **kwargs): return self._request_handler(self).post(*args, **kwargs) def update(self, *args, **kwargs): return self._request_handler(self).put(*args, **kwargs) def delete(self, *args, **kwargs): return self._request_handler(self).delete(*args, **kwargs) def _get_ip_address(self, ips): for ip in ips: yield self._object_mapping.object_from_json('ip_address', ip) class AgentApi(ChatApiBase): def __init__(self, config, endpoint): super(AgentApi, self).__init__(config, endpoint=endpoint, request_handler=AgentRequest) def me(self): return self._get(self._build_url(self.endpoint.me())) class ChatApi(ChatApiBase, ChatIncrementalApi): def __init__(self, config, endpoint): super(ChatApi, self).__init__(config, endpoint=endpoint) self.accounts = ChatApiBase(config, endpoint.account, request_handler=AccountRequest) self.agents = AgentApi(config, endpoint.agents) self.visitors = ChatApiBase(config, endpoint.visitors, request_handler=VisitorRequest) self.shortcuts = ChatApiBase(config, endpoint.shortcuts) self.triggers = ChatApiBase(config, endpoint.triggers) self.bans = ChatApiBase(config, endpoint.bans) self.departments = ChatApiBase(config, endpoint.departments) self.goals = ChatApiBase(config, endpoint.goals) self.stream = ChatApiBase(config, endpoint.stream) def search(self, *args, **kwargs): url = self._build_url(self.endpoint.search(*args, **kwargs)) return self._get(url) class HelpCentreApiBase(Api): def __init__(self, config, endpoint, object_type): super(HelpCentreApiBase, self).__init__(config, object_type=object_type, endpoint=endpoint) self._response_handlers = ( MissingTranslationHandler,) + self._response_handlers self._object_mapping = HelpCentreObjectMapping(self) self.locale = '' def _process_response(self, response, object_mapping=None): endpoint_path = get_endpoint_path(self, response) if (endpoint_path.startswith('/help_center') or endpoint_path.startswith('/community') or endpoint_path.startswith('/guide')): object_mapping = self._object_mapping else: object_mapping = ZendeskObjectMapping(self) return super(HelpCentreApiBase, self)._process_response(response, object_mapping) def _build_url(self, endpoint): return super(HelpCentreApiBase, self)._build_url(endpoint) class TranslationApi(Api): @extract_id(Article, Section, Category) def translations(self, help_centre_object): return self._query_zendesk(self.endpoint.translations, object_type='translation', id=help_centre_object) @extract_id(Article, Section, Category) def missing_translations(self, help_centre_object): return self._query_zendesk(self.endpoint.missing_translations, object_type='translation', id=help_centre_object) @extract_id(Article, Section, Category) def create_translation(self, help_centre_object, translation): return TranslationRequest(self).post(self.endpoint.create_translation, help_centre_object, translation) @extract_id(Article, Section, Category) def update_translation(self, help_centre_object, translation): return TranslationRequest(self).put(self.endpoint.update_translation, help_centre_object, translation) @extract_id(Translation) def delete_translation(self, translation): return TranslationRequest(self).delete( self.endpoint.delete_translation, translation) class SubscriptionApi(Api): @extract_id(Article, Section, Post, Topic) def subscriptions(self, help_centre_object): return self._query_zendesk(self.endpoint.subscriptions, object_type='subscriptions', id=help_centre_object) @extract_id(Article, Section, Post, Topic) def create_subscription(self, help_centre_object, subscription): return SubscriptionRequest(self).post(self.endpoint.subscriptions, help_centre_object, subscription) @extract_id(Article, Section, Post, Topic, Subscription) def delete_subscription(self, help_centre_object, subscription): return SubscriptionRequest(self).delete( self.endpoint.subscriptions_delete, help_centre_object, subscription) class VoteApi(Api): @extract_id(Article, Post, Comment) def votes(self, help_centre_object): url = self._build_url(self.endpoint.votes(id=help_centre_object)) return self._get(url) @extract_id(Article, Post, Comment) def vote_up(self, help_centre_object): url = self._build_url(self.endpoint.votes.up(id=help_centre_object)) return self._post(url, payload={}) @extract_id(Article, Post, Comment) def vote_down(self, help_centre_object): url = self._build_url(self.endpoint.votes.down(id=help_centre_object)) return self._post(url, payload={}) class VoteCommentApi(Api): @extract_id(Article, Post, Comment) def comment_votes(self, help_centre_object, comment): url = self._build_url( self.endpoint.comment_votes(help_centre_object, comment)) return self._get(url) @extract_id(Article, Post, Comment) def vote_comment_up(self, help_centre_object, comment): url = self._build_url( self.endpoint.comment_votes.up(help_centre_object, comment)) return self._post(url, payload={}) @extract_id(Article, Post, Comment) def vote_comment_down(self, help_centre_object, comment): url = self._build_url( self.endpoint.comment_votes.down(help_centre_object, comment)) return self._post(url, payload={}) class ArticleApi(HelpCentreApiBase, TranslationApi, SubscriptionApi, VoteApi, VoteCommentApi, IncrementalApi): @extract_id(Section) def create(self, section, article): """ Create (POST) an Article - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/help_center/articles#create-article>`__. :param section: Section ID or object :param article: Article to create """ return CRUDRequest(self).post(article, create=True, id=section) def update(self, article): """ Update (PUT) and Article - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/help_center/articles#update-article>`__. :param article: Article to update """ return CRUDRequest(self).put(article) def archive(self, article): """ Archive (DELETE) an Article - See: Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/help_center/articles#archive-article>`__. :param article: Article to archive """ return CRUDRequest(self).delete(article) @extract_id(Article) def comments(self, article): """ Retrieve comments for an article :param article: Article ID or object """ return self._query_zendesk(self.endpoint.comments, object_type='comment', id=article) @extract_id(Article) def labels(self, article): return self._query_zendesk(self.endpoint.labels, object_type='label', id=article) @extract_id(Article) def show_translation(self, article, locale): url = self._build_url(self.endpoint.show_translation(article, locale)) return self._get(url) def search(self, *args, **kwargs): url = self._build_url(self.endpoint.search(*args, **kwargs)) return self._get(url) class CommentApi(HelpCentreApiBase): def __call__(self, *args, **kwargs): raise ZenpyException("You cannot directly call this Api!") @extract_id(Article, Comment) def show(self, article, comment): url = self._build_url(self.endpoint.comment_show(article, comment)) return self._get(url) @extract_id(Article) def create(self, article, comment): if comment.locale is None: raise ZenpyException( "locale is required when creating comments - " "https://developer.zendesk.com/rest_api/docs/help_center/comments#create-comment" ) return HelpdeskCommentRequest(self).post(self.endpoint.comments, article, comment) @extract_id(Article) def update(self, article, comment): return HelpdeskCommentRequest(self).put(self.endpoint.comments_update, article, comment) @extract_id(Article, Comment) def delete(self, article, comment): return HelpdeskCommentRequest(self).delete( self.endpoint.comments_delete, article, comment) @extract_id(User) def user_comments(self, user): return self._query_zendesk(self.endpoint.user_comments, object_type='comment', id=user) class CategoryApi(HelpCentreApiBase, CRUDApi, TranslationApi): def articles(self, category_id): return self._query_zendesk(self.endpoint.articles, 'article', id=category_id) def sections(self, category_id): return self._query_zendesk(self.endpoint.sections, 'section', id=category_id) class AccessPolicyApi(Api): @extract_id(Topic, Section) def access_policies(self, help_centre_object): return self._query_zendesk(self.endpoint.access_policies, 'access_policy', id=help_centre_object) @extract_id(Topic, Section) def update_access_policy(self, help_centre_object, access_policy): return AccessPolicyRequest(self).put(self.endpoint.access_policies, help_centre_object, access_policy) class SectionApi(HelpCentreApiBase, CRUDApi, TranslationApi, SubscriptionApi, AccessPolicyApi): @extract_id(Section) def articles(self, section, locale='en-us'): return self._query_zendesk(self.endpoint.articles, 'article', id=section, locale=locale) def create(self, section): return CRUDRequest(self).post(section, create=True, id=section.category_id) class ArticleAttachmentApi(HelpCentreApiBase, SubscriptionApi): @extract_id(Article) def __call__(self, article): """ Returns all attachments associated with article_id either ``inline=True`` or ``inline=False``. :param article: Numeric article id or :class:`Article` object. :return: Generator with all associated articles attachments. """ return self._query_zendesk(self.endpoint, 'article_attachment', id=article) @extract_id(Article) def inline(self, article): """ Returns all inline attachments associated with article_id where (Such attachments has ``inline=True`` flag). Inline attachments and its url can be referenced in the HTML body of the article. :param article: Numeric article id or :class:`Article` object. :return: Generator with all associated inline attachments. """ return self._query_zendesk(self.endpoint.inline, 'article_attachment', id=article) @extract_id(Article) def block(self, article): """ Returns all block attachments associated with article_id (Such attachments has ``inline=False``). Block attachments are displayed as separated files attached to Article. :param article: Numeric article id or :class:`Article` object. :return: Generator with all associated block attachments. """ return self._query_zendesk(self.endpoint.block, 'article_attachment', id=article) @extract_id(ArticleAttachment) def show(self, attachment): return self._query_zendesk(self.endpoint, 'article_attachment', id=attachment) @extract_id(Article) def create(self, article, attachment, inline=False, file_name=None, content_type=None): """ This function creates attachment attached to article. :param article: Numeric article id or :class:`Article` object. :param attachment: File object or os path to file :param inline: If true, the attached file is shown in the dedicated admin UI for inline attachments and its url can be referenced in the HTML body of the article. If false, the attachment is listed in the list of attachments. Default is `false` :param file_name: you can set filename on file upload. :param content_type: The content type of the file. `Example: image/png`, Zendesk can ignore it. :return: :class:`ArticleAttachment` object """ return HelpdeskAttachmentRequest(self).post(self.endpoint.create, article=article, attachments=attachment, inline=inline, file_name=file_name, content_type=content_type) def create_unassociated(self, attachment, inline=False, file_name=None, content_type=None): """ You can use this endpoint for bulk imports. It lets you upload a file without associating it to an article until later. Check Zendesk documentation `important notes <https://developer.zendesk.com/rest_api/docs/help_center/article_attachments#create-unassociated-attachment> :param attachment: File object or os path to file :param inline: If true, the attached file is shown in the dedicated admin UI for inline attachments and its url can be referenced in the HTML body of the article. If false, the attachment is listed in the list of attachments. Default is `false` :param file_name: you can set filename on file upload. :param content_type: The content type of the file. `Example: image/png`, Zendesk can ignore it. :return: :class:`ArticleAttachment` object """ return HelpdeskAttachmentRequest(self).post( self.endpoint.create_unassociated, attachments=attachment, inline=inline, file_name=file_name, content_type=content_type) @extract_id(ArticleAttachment) def delete(self, article_attachment): """ This function completely wipes attachment from Zendesk Helpdesk article. :param article_attachment: :class:`ArticleAttachment` object or numeric article attachment id. :return: status_code == 204 on success """ return HelpdeskAttachmentRequest(self).delete(self.endpoint.delete, article_attachment) @extract_id(Article) def bulk_attachments(self, article, attachments): """ This function implements associating attachments to an article after article creation (for unassociated attachments). :param article: Article id or :class:`Article` object :param attachments: :class:`ArticleAttachment` object, or list of :class:`ArticleAttachment` objects, up to 20 supported. `Zendesk documentation. <https://developer.zendesk.com/rest_api/docs/help_center/articles#associate-attachments-in-bulk-to-article>`__ :return: """ return HelpdeskAttachmentRequest(self).post( self.endpoint.bulk_attachments, article=article, attachments=attachments) class LabelApi(HelpCentreApiBase): @extract_id(Article) def create(self, article, label): return HelpCentreRequest(self).post(self.endpoint.create, article, label) @extract_id(Article, Label) def delete(self, article, label): return HelpCentreRequest(self).delete(self.endpoint.delete, article, label) class TopicApi(HelpCentreApiBase, CRUDApi, SubscriptionApi): @extract_id(Topic) def posts(self, topic): url = self._build_url(self.endpoint.posts(id=topic)) return self._get(url) class PostCommentApi(HelpCentreApiBase, VoteCommentApi): @extract_id(Post) def __call__(self, post): return super(PostCommentApi, self).__call__(id=post) @extract_id(Post) def create(self, post, comment): return PostCommentRequest(self).post(self.endpoint, post, comment) @extract_id(Post) def update(self, post, comment): return PostCommentRequest(self).put(self.endpoint.update, post, comment) @extract_id(Post, Comment) def delete(self, post, comment): return PostCommentRequest(self).delete(self.endpoint.delete, post, comment) class PostApi(HelpCentreApiBase, CRUDApi, SubscriptionApi, VoteApi): def __init__(self, config, endpoint, object_type): super(PostApi, self).__init__(config, endpoint, object_type) self.comments = PostCommentApi(config, endpoint.comments, 'post') class UserSegmentApi(HelpCentreApiBase, CRUDApi): def applicable(self): return self._query_zendesk(self.endpoint.applicable, object_type='user_segment') @extract_id(Section) def sections(self, section): return self._query_zendesk(self.endpoint.sections, object_type='section', id=section) @extract_id(Topic) def topics(self, topic): return self._query_zendesk(self.endpoint.topics, object_type='topic', id=topic) class PermissionGroupApi(HelpCentreApiBase, CRUDApi): pass class HelpCentreApi(HelpCentreApiBase): def __init__(self, config): super(HelpCentreApi, self).__init__(config, endpoint=EndpointFactory('help_centre'), object_type='help_centre') self.articles = ArticleApi(config, self.endpoint.articles, object_type='article') self.comments = CommentApi(config, self.endpoint.articles, object_type='comment') self.sections = SectionApi(config, self.endpoint.sections, object_type='section') self.categories = CategoryApi(config, self.endpoint.categories, object_type='category') self.attachments = ArticleAttachmentApi( config, self.endpoint.attachments, object_type='article_attachment') self.labels = LabelApi(config, self.endpoint.labels, object_type='label') self.topics = TopicApi(config, self.endpoint.topics, object_type='topic') self.posts = PostApi(config, self.endpoint.posts, object_type='post') self.user_segments = UserSegmentApi(config, self.endpoint.user_segments, object_type='user_segment') self.permission_groups = PermissionGroupApi( config, self.endpoint.permission_groups, object_type='permission_group') def __call__(self, *args, **kwargs): raise NotImplementedError("Cannot directly call the HelpCentreApi!") class NpsApi(Api): def __init__(self, config): super(NpsApi, self).__init__(config, object_type='nps') def __call__(self, *args, **kwargs): raise ZenpyException("You cannot call this endpoint directly!") def recipients_incremental(self, start_time): """ Retrieve NPS Recipients incremental :param start_time: time to retrieve events from. """ return self._query_zendesk(self.endpoint.recipients_incremental, 'recipients', start_time=start_time) def responses_incremental(self, start_time): """ Retrieve NPS Responses incremental :param start_time: time to retrieve events from. """ return self._query_zendesk(self.endpoint.responses_incremental, 'responses', start_time=start_time) class TalkApiBase(Api): def __init__(self, config, endpoint, object_type): super(TalkApiBase, self).__init__(config, object_type=object_type, endpoint=endpoint) self._object_mapping = TalkObjectMapping(self) def _build_url(self, endpoint): return super(TalkApiBase, self)._build_url(endpoint) class TalkApi(TalkApiBase): def __init__(self, config): super(TalkApi, self).__init__(config, endpoint=EndpointFactory('talk'), object_type='talk') self.calls = CallApi(config, self.endpoint.calls, object_type='call') self.current_queue_activity = StatsApi( config, self.endpoint.current_queue_activity, object_type='current_queue_activity') self.agents_activity = StatsApi(config, self.endpoint.agents_activity, object_type='agents_activity') self.availability = AvailabilitiesApi(config, self.endpoint.availability, object_type='availability') self.account_overview = StatsApi(config, self.endpoint.account_overview, object_type='account_overview') self.phone_numbers = PhoneNumbersApi(config, self.endpoint.phone_numbers, object_type='phone_numbers') self.agents_overview = StatsApi(config, self.endpoint.agents_overview, object_type='agents_overview') self.legs = LegApi(config, self.endpoint.legs, object_type='leg') def __call__(self, *args, **kwargs): raise NotImplementedError("Cannot directly call the TalkApi!") class CallApi(TalkApiBase, IncrementalApi): def __init__(self, config, endpoint, object_type): super(CallApi, self).__init__(config, object_type=object_type, endpoint=endpoint) class LegApi(TalkApiBase, IncrementalApi): def __init__(self, config, endpoint, object_type): super(LegApi, self).__init__(config, object_type=object_type, endpoint=endpoint) class StatsApi(TalkApiBase): def __init__(self, config, endpoint, object_type): super(StatsApi, self).__init__(config, object_type=object_type, endpoint=endpoint) class AvailabilitiesApi(TalkApiBase): def __init__(self, config, endpoint, object_type): super(AvailabilitiesApi, self).__init__(config, object_type=object_type, endpoint=endpoint) class PhoneNumbersApi(TalkApiBase): def __init__(self, config, endpoint, object_type): super(PhoneNumbersApi, self).__init__(config, object_type=object_type, endpoint=endpoint) class TalkPEApi(Api): def __init__(self, config): super(TalkPEApi, self).__init__(config, endpoint=EndpointFactory('talk_pe'), object_type='talk_pe') def __call__(self, *args, **kwargs): raise ZenpyException("You cannot call this endpoint directly!") @extract_id(User) def display_user(self, agent, user): """ Show a user's profile page to a specified agent :param agent: An agent to whom the profile is shown :param ticket: A user to show his profile """ url = self._build_url(self.endpoint.display_user(agent, user)) return self._post(url, payload=''); @extract_id(User, Ticket) def display_ticket(self, agent, ticket): """ Show a ticket to a specified agent :param agent: An agent to whom the ticket is shown :param ticket: A ticket to show """ url = self._build_url(self.endpoint.display_ticket(agent, ticket)) return self._post(url, payload=''); @extract_id(User) def create_ticket(self, agent, ticket): """ Create a new voicemail tiсket and show it to a specified agent Note: the ticket must have a "via_id" parameter set. Details: https://developer.zendesk.com/api-reference/voice/talk-partner-edition-api/reference/#creating-tickets :param agent: An agent to whom the new ticket is shown :param ticket: A ticket to show """ url = self._build_url(self.endpoint.create_ticket()) payload = { "display_to_agent": agent if agent else "", "ticket": ticket } return self._post(url, payload=payload); class CustomAgentRolesApi(CRUDApi): pass class SearchApi(Api): def __init__(self, config): super(SearchApi, self).__init__(config, object_type='results', endpoint=EndpointFactory('search')) self._object_mapping = ZendeskObjectMapping(self) def __call__(self, *args, **kwargs): return self._query_zendesk(self.endpoint, self.object_type, *args, **kwargs) def count(self, *args, **kwargs): """ Returns results count only """ return self._query_zendesk(self.endpoint.count, 'search_count', *args, **kwargs) class SearchExportApi(Api): def __init__(self, config): super(SearchExportApi, self).__init__(config, object_type='results', endpoint=EndpointFactory('search_export')) self._object_mapping = ZendeskObjectMapping(self) def __call__(self, *args, **kwargs): return self._query_zendesk(self.endpoint, self.object_type, *args, **kwargs) class UserFieldsApi(CRUDApi): def __init__(self, config): super(UserFieldsApi, self).__init__(config, object_type='user_field') class ZISApi(Api): def __init__(self, config): super(ZISApi, self).__init__(config, endpoint=EndpointFactory('zis'), object_type='') self.registry = ZISRegistryApi(config, endpoint=self.endpoint.registry, object_type='integration') def __call__(self, *args, **kwargs): raise ZenpyException("You cannot call this endpoint directly!") class ZISRegistryApi(Api): def __init__(self, config, endpoint, object_type): super(ZISRegistryApi, self).__init__(config, endpoint=endpoint, object_type=object_type) self.api_prefix = "/api/services/zis/registry" def __call__(self, *args, **kwargs): raise ZenpyException("You cannot call this endpoint directly!") def create_integration(self, integration, description): """ Creates a new ZIS integration :param integration: A name for a new integration :param description: Description of the integration """ url = self._build_url(endpoint=self.endpoint.create_integration(integration)) return self._post(url, payload=dict(description=description)); def upload_bundle(self, integration, bundle): """ Uploads or updates a bundle :param integration: A name of an integration to store the bundle :param bundle: JSON string with the bundle """ url = self._build_url(endpoint=self.endpoint.upload_bundle(integration)) return self._post(url, payload=bundle) def install(self, integration, job_spec): """ Installs a JobSpec from an uploaded bundle to handle events :param integration: A name of an integration containing the JobSpec :param job_spec: A JobSpec name """ url = self._build_url(endpoint=self.endpoint.install(integration, job_spec)) return self._post(url, payload=None) def uninstall(self, integration, job_spec): """ Uninstalls a JobSpec :param integration: A name of an integration containing the JobSpec :param job_spec: A JobSpec name """ url = self._build_url(endpoint=self.endpoint.install(integration, job_spec)) return self._delete(url, payload=None) class WebhooksApi(CRUDApi): def __init__(self, config): super(WebhooksApi, self).__init__(config, object_type='webhook') def update(self, webhook_id, new_webhook): """ Update (PUT) a webhook. A specific method is used because we need a serialization of the full object, not only changed fields :param webhook_id: A webhook id to update :param new_webhook: A new webhook object """ payload = dict( webhook=json.loads( json.dumps( new_webhook, default=json_encode_for_printing ) ) ) url = self._build_url(endpoint=self.endpoint(id=webhook_id)) return self._put(url, payload=payload) def patch(self, webhook): """ Partially Update (PATCH) a webhook. :param webhook: A webhook to patch """ payload = dict( webhook=json.loads( json.dumps( webhook, default=json_encode_for_zendesk ) ) ) url = self._build_url(endpoint=self.endpoint(id=webhook.id)) return self._patch(url, payload=payload) def list(self, **kwargs): """ List webhooks """ url = self._build_url(endpoint=self.endpoint(**kwargs)) return self._get(url) @extract_id(Webhook) def clone(self, webhook): """ Clone a webhook :param webhook: a webhook to clone """ url = self._build_url(endpoint=self.endpoint(clone_webhook_id=webhook)) return self._post(url, payload=None) @extract_id(Webhook) def invocations(self, webhook): """ Get a webhook invocations :param webhook: a webhook to get invocations """ url = self._build_url(endpoint=self.endpoint.invocations(id=webhook)) return self._get(url) def invocation_attempts(self, webhook, invocation): """ Get a webhhok invocation attemps :param webhook: a webhook to inspect :param invocation: an invocation to get attempts """ url = self._build_url(endpoint=self.endpoint.invocation_attempts(webhook, invocation)) return self._get(url) @extract_id(Webhook) def test(self, webhook=None, request={}): """ Test an existing or a new webhook :param webhook: An optional existing webhook id :param request: An optional webhook data """ params = dict(test_webhook_id=webhook) if webhook else {} payload = dict(request=request) url = self._build_url(endpoint=self.endpoint.test(**params)) return self._post(url, payload=payload) @extract_id(Webhook) def show_secret(self, webhook): """ Shows a webhook secret :param webhook: A webhook to show secret """ url = self._build_url(endpoint=self.endpoint.secret(webhook)) return self._get(url) @extract_id(Webhook) def reset_secret(self, webhook): """ Shows a webhook secret :param webhook: A webhook to show secret """ url = self._build_url(endpoint=self.endpoint.secret(webhook)) return self._post(url, payload=None) class LocalesApi(Api): def __init__(self, config): super(LocalesApi, self).__init__(config, object_type='locale') def agent(self): """ Lists the translation locales that have been localized for agents on a specific account. :return: list of Locale objects """ return self._query_zendesk(self.endpoint.agent, 'locale') def public(self): """ Lists the translation locales that are available to all accounts. :return: list of Locale objects """ return self._query_zendesk(self.endpoint.public, 'locale') def current(self): """ This works like Show Locale, but instead of taking a locale id as an argument, it renders the locale of the user performing the request. :return: Locale """ return self._query_zendesk(self.endpoint.current, 'locale')
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/api.py
api.py
import os from zenpy.lib.api_objects.chat_objects import Shortcut, Trigger from zenpy.lib.endpoint import EndpointFactory from zenpy.lib.exception import ZenpyException, TooManyValuesException from zenpy.lib.util import get_object_type, as_plural, is_iterable_but_not_string try: from collections.abc import Iterable except ImportError: from collections import Iterable class RequestHandler(object): """ Abstraction of a request to either the Zendesk API or the Chat API. Only POST, PUT and DELETE are handled. Subclasses implement the logic needed to correctly serialize the request to JSON and send off to the relevant API. """ def __init__(self, api): self.api = api def put(self, api_objects, *args, **kwargs): raise NotImplementedError("PUT is not implemented!") def post(self, api_objects, *args, **kwargs): raise NotImplementedError("POST is not implemented!") def delete(self, api_objects, *args, **kwargs): raise NotImplementedError("DELETE is not implemented!") class BaseZendeskRequest(RequestHandler): """ Base class for Zendesk requests. Provides a few handy methods. """ def build_payload(self, api_objects): if isinstance(api_objects, Iterable): payload_key = as_plural(self.api.object_type) else: payload_key = self.api.object_type return {payload_key: self.api._serialize(api_objects)} def check_type(self, zenpy_objects): """ Ensure the passed type matches this API's object_type. """ expected_type = self.api._object_mapping.class_for_type( self.api.object_type) if not isinstance(zenpy_objects, Iterable): zenpy_objects = [zenpy_objects] for zenpy_object in zenpy_objects: if not isinstance(zenpy_object, expected_type): raise ZenpyException( "Invalid type - expected {} found {}".format( expected_type, type(zenpy_object))) class CRUDRequest(BaseZendeskRequest): """ Generic CRUD request. Most CRUD operations are handled by this class. """ def post(self, api_objects, *args, **kwargs): self.check_type(api_objects) create_or_update = kwargs.pop('create_or_update', False) create = kwargs.pop('create', False) if isinstance(api_objects, Iterable) and create_or_update: kwargs['create_or_update_many'] = True endpoint = self.api.endpoint.create_or_update_many elif isinstance(api_objects, Iterable): kwargs['create_many'] = True endpoint = self.api.endpoint elif create_or_update: endpoint = self.api.endpoint.create_or_update elif create: endpoint = self.api.endpoint.create else: endpoint = self.api.endpoint payload = self.build_payload(api_objects) url = self.api._build_url(endpoint(*args, **kwargs)) return self.api._post(url, payload) def put(self, api_objects, update_many_external=False, *args, **kwargs): self.check_type(api_objects) if update_many_external: kwargs['update_many_external'] = [ o.external_id for o in api_objects ] elif isinstance(api_objects, Iterable): kwargs['update_many'] = True else: kwargs['id'] = api_objects.id payload = self.build_payload(api_objects) url = self.api._build_url(self.api.endpoint(*args, **kwargs)) return self.api._put(url, payload=payload) def delete(self, api_objects, destroy_many_external=False, *args, **kwargs): self.check_type(api_objects) if destroy_many_external: kwargs['destroy_many_external'] = [ o.external_id for o in api_objects ] elif isinstance(api_objects, Iterable): kwargs['destroy_ids'] = [i.id for i in api_objects] else: kwargs['id'] = api_objects.id payload = self.build_payload(api_objects) url = self.api._build_url(self.api.endpoint(*args, **kwargs)) response = self.api._delete(url, payload=payload) self.api.cache.delete(api_objects) return response class SuspendedTicketRequest(BaseZendeskRequest): """ Handle updating and deleting SuspendedTickets. """ def post(self, api_objects, *args, **kwargs): raise NotImplementedError( "POST is not implemented for suspended tickets!") def put(self, tickets, *args, **kwargs): self.check_type(tickets) endpoint_kwargs = dict() if isinstance(tickets, Iterable): endpoint_kwargs['recover_ids'] = [i.id for i in tickets] endpoint = self.api.endpoint else: endpoint_kwargs['id'] = tickets.id endpoint = self.api.endpoint.recover payload = self.build_payload(tickets) url = self.api._build_url(endpoint(**endpoint_kwargs)) return self.api._put(url, payload=payload) def delete(self, tickets, *args, **kwargs): self.check_type(tickets) endpoint_kwargs = dict() if isinstance(tickets, Iterable): endpoint_kwargs['destroy_ids'] = [i.id for i in tickets] else: endpoint_kwargs['id'] = tickets.id payload = self.build_payload(tickets) url = self.api._build_url(self.api.endpoint(**endpoint_kwargs)) response = self.api._delete(url, payload=payload) self.api.cache.delete(tickets) return response class TagRequest(RequestHandler): """ Handle tag operations. """ def post(self, tags, *args, **kwargs): return self.modify_tags(self.api._post, tags, *args) def put(self, tags, *args, **kwargs): return self.modify_tags(self.api._put, tags, *args) def delete(self, tags, *args, **kwargs): return self.modify_tags(self.api._delete, tags, *args) def modify_tags(self, http_method, tags, id): url = self.api._build_url(self.api.endpoint.tags(id=id)) payload = dict(tags=tags) return http_method(url, payload=payload) class RateRequest(RequestHandler): """ Handles submitting ratings. """ def post(self, rating, *args, **kwargs): url = self.api._build_url( self.api.endpoint.satisfaction_ratings(*args)) payload = {get_object_type(rating): self.api._serialize(rating)} return self.api._post(url, payload=payload) def put(self, api_objects, *args, **kwargs): raise NotImplementedError("PUT is not implemented for RateRequest!") def delete(self, api_objects, *args, **kwargs): raise NotImplementedError("DELETE is not implemented for RateRequest!") class UserIdentityRequest(BaseZendeskRequest): """ Handle CRUD operations on UserIdentities. """ def post(self, user_id, identity): payload = self.build_payload(identity) url = self.api._build_url(self.api.endpoint(id=user_id)) return self.api._post(url, payload=payload) def put(self, endpoint, user_id, identity_id, identity=None): payload = self.build_payload(identity) if identity else {} url = self.api._build_url(endpoint(user_id, identity_id)) return self.api._put(url, payload=payload) def delete(self, user, identity): payload = self.build_payload(identity) url = self.api._build_url(self.api.endpoint.delete(user, identity)) return self.api._delete(url, payload=payload) class UploadRequest(RequestHandler): """ Handles uploading files to Zendesk. """ def post(self, fp, token=None, target_name=None, content_type=None, api_object=None): if hasattr(fp, 'read'): # File-like objects such as: # PY3: io.StringIO, io.TextIOBase, io.BufferedIOBase # PY2: file, io.StringIO, StringIO.StringIO, cStringIO.StringIO if not hasattr(fp, 'name') and not target_name: raise ZenpyException("upload requires a target file name") else: target_name = target_name or fp.name elif hasattr(fp, 'name'): # Path objects (pathlib.Path) if fp.name == '': raise ZenpyException("upload requires a target file name") target_name = target_name or fp.name # PathLike objects only compatible with python3.6 and above, so # we convert to string at this point # https://stackoverflow.com/a/42694113/4664727 fp = open(str(fp), 'rb') elif isinstance(fp, str): if os.path.isfile(fp): fp = open(fp, 'rb') target_name = target_name or fp.name elif not target_name: # Valid string, which is not a path, and without a target name raise ZenpyException("upload requires a target file name") elif not target_name: # Other serializable types accepted by requests (like dict) raise ZenpyException("upload requires a target file name") url = self.api._build_url( self.api.endpoint.upload(filename=target_name, token=token)) response = self.api._post(url, data=fp, payload={}, content_type=content_type) if hasattr(fp, "close"): fp.close() return response def put(self, api_objects, *args, **kwargs): raise NotImplementedError("POST is not implemented fpr UploadRequest!") def delete(self, api_objects, *args, **kwargs): raise NotImplementedError( "DELETE is not implemented fpr UploadRequest!") class UserMergeRequest(BaseZendeskRequest): """ Handles merging two users. """ def put(self, source, destination): url = self.api._build_url(self.api.endpoint.merge(id=source)) payload = {self.api.object_type: dict(id=destination)} return self.api._put(url, payload=payload) def post(self, *args, **kwargs): raise NotImplementedError( "POST is not implemented for UserMergeRequest!") def delete(self, api_objects, *args, **kwargs): raise NotImplementedError( "DELETE is not implemented for UserMergeRequest!") class TicketMergeRequest(RequestHandler): """ Handles merging one or more tickets. """ def post(self, target, source, target_comment=None, source_comment=None): if not is_iterable_but_not_string(source): source = [source] payload = dict(ids=source, target_comment=target_comment, source_comment=source_comment) url = self.api._build_url(self.api.endpoint.merge(id=target)) return self.api._post(url, payload=payload) def put(self, api_objects, *args, **kwargs): raise NotImplementedError( "PUT is not implemented for TicketMergeRequest!") def delete(self, api_objects, *args, **kwargs): raise NotImplementedError( "DELETE is not implemented fpr TicketMergeRequest!") class SatisfactionRatingRequest(BaseZendeskRequest): """ Handle rating a ticket. """ def post(self, ticket_id, satisfaction_rating): payload = self.build_payload(satisfaction_rating) url = self.api._build_url( EndpointFactory('satisfaction_ratings').create(id=ticket_id)) return self.api._post(url, payload) def put(self, api_objects, *args, **kwargs): raise NotImplementedError( "PUT is not implemented for SatisfactionRequest!") def delete(self, api_objects, *args, **kwargs): raise NotImplementedError( "DELETE is not implemented fpr SatisfactionRequest!") class OrganizationFieldReorderRequest(RequestHandler): def put(self, api_objects, *args, **kwargs): payload = {'organization_field_ids': api_objects} url = self.api._build_url(self.api.endpoint.reorder()) return self.api._put(url, payload=payload) class TicketFieldOptionRequest(BaseZendeskRequest): def post(self, ticket_field, custom_field_option): cfo_id = getattr(custom_field_option, 'id', None) if cfo_id is not None: url = self.api._build_url( self.api.endpoint.update(id=ticket_field)) else: url = self.api._build_url(self.api.endpoint(id=ticket_field)) return self.api._post(url, payload=self.build_payload(custom_field_option)) def delete(self, ticket_field, custom_field_option): url = self.api._build_url( self.api.endpoint.delete(ticket_field, custom_field_option)) return self.api._delete(url) class VariantRequest(BaseZendeskRequest): def post(self, item, variant): if isinstance(variant, Iterable): endpoint = self.api.endpoint.create_many else: endpoint = self.api.endpoint url = self.api._build_url(endpoint(item)) payload = self.build_payload(variant) return self.api._post(url, payload=payload) def put(self, item, variant): if isinstance(variant, Iterable): url = self.api._build_url(self.api.endpoint.update_many(id=item)) else: url = self.api._build_url( self.api.endpoint.update(item, variant.id)) payload = self.build_payload(variant) return self.api._put(url, payload=payload) def delete(self, item, variant): url = self.api._build_url(self.api.endpoint.delete(item, variant)) deleted = self.api._delete(url) delete_from_cache(deleted) return deleted class ChatApiRequest(RequestHandler): """ Generic Chat API request. Most CRUD operations on Chat API endpoints are handled by this class. """ def put(self, chat_object): identifier = self.get_object_identifier(chat_object) value = getattr(chat_object, identifier) setattr(chat_object, identifier, None) # The API freaks out when a identifier is in the JSON payload = self.flatten_chat_object(self.api._serialize(chat_object)) url = self.api._build_url(self.api.endpoint(**{identifier: value})) return self.api._put(url, payload=payload) def post(self, chat_object): payload = self.api._serialize(chat_object) return self.api._post(self.api._build_url(self.api.endpoint()), payload=payload) def delete(self, chat_object, *args, **kwargs): identifier = self.get_object_identifier(chat_object) value = getattr(chat_object, identifier) url = self.api._build_url(self.api.endpoint(**{identifier: value})) return self.api._delete(url) def flatten_chat_object(self, chat_object, parent_key=''): items = [] for key, value in chat_object.items(): new_key = "{}.{}".format(parent_key, key) if parent_key else key if isinstance(value, dict): items.extend(self.flatten_chat_object(value, new_key).items()) else: items.append((new_key, value)) return dict(items) def get_object_identifier(self, chat_object): if type(chat_object) in (Shortcut, Trigger): return 'name' else: return 'id' class AccountRequest(RequestHandler): """ Handle creating and updating Accounts. """ def put(self, account): payload = self.build_payload(account) return self.api._put(self.api._build_url(self.api.endpoint()), payload=payload) def post(self, account): payload = self.build_payload(account) return self.api._post(self.api._build_url(self.api.endpoint()), payload=payload) def delete(self, chat_object, *args, **kwargs): raise NotImplementedError("Cannot delete accounts!") def build_payload(self, account): return {get_object_type(account): self.api._serialize(account)} class PersonRequest(RequestHandler): """ Handle CRUD operations on Chat API objects representing people. """ def put(self, chat_object): agent_id = chat_object.id chat_object.id = None # The API freaks out if id is included. payload = self.api._serialize(chat_object) url = self.api._build_url(self.api.endpoint(id=agent_id)) return self.api._put(url, payload=payload) def post(self, account): payload = self.api._serialize(account) return self.api._post(self.api._build_url(self.api.endpoint()), payload=payload) def delete(self, chat_object): url = self.api._build_url(self.api.endpoint(id=chat_object.id)) return self.api._delete(url) class AgentRequest(PersonRequest): pass class VisitorRequest(PersonRequest): pass class HelpdeskCommentRequest(BaseZendeskRequest): def put(self, endpoint, article, comment): url = self.api._build_url(endpoint(article, comment.id)) payload = self.build_payload(comment) return self.api._put(url, payload) def post(self, endpoint, article, comment): url = self.api._build_url(endpoint(id=article)) payload = self.build_payload(comment) return self.api._post(url, payload) def delete(self, endpoint, article, comment): url = self.api._build_url(endpoint(article, comment)) return self.api._delete(url) class HelpCentreRequest(BaseZendeskRequest): def put(self, endpoint, article, api_object): url = self.api._build_url(endpoint(article, api_object)) payload = self.build_payload(api_object) return self.api._put(url, payload) def post(self, endpoint, article, api_object): url = self.api._build_url(endpoint(id=article)) payload = self.build_payload(api_object) return self.api._post(url, payload) def delete(self, endpoint, article, api_object): url = self.api._build_url(endpoint(article, api_object)) return self.api._delete(url) class PostCommentRequest(HelpCentreRequest): def build_payload(self, translation): return {get_object_type(translation): self.api._serialize(translation)} def put(self, endpoint, post, comment): url = self.api._build_url(endpoint(post, comment.id)) payload = self.build_payload(comment) return self.api._put(url, payload) class SubscriptionRequest(HelpCentreRequest): def build_payload(self, translation): return {get_object_type(translation): self.api._serialize(translation)} class AccessPolicyRequest(BaseZendeskRequest): def put(self, endpoint, help_centre_object, access_policy): payload = self.build_payload(access_policy) url = self.api._build_url(endpoint(id=help_centre_object)) return self.api._put(url, payload=payload) def delete(self, api_objects, *args, **kwargs): raise NotImplementedError("Cannot delete access policies!") def post(self, api_objects, *args, **kwargs): raise NotImplementedError("POST not supported for access policies!") def build_payload(self, help_centre_object): return { get_object_type(help_centre_object): self.api._serialize(help_centre_object) } class TranslationRequest(HelpCentreRequest): def build_payload(self, translation): return {get_object_type(translation): self.api._serialize(translation)} def put(self, endpoint, help_centre_object_id, translation): if translation.locale is None: raise ZenpyException( "Locale can not be None when updating translation!") url = self.api._build_url( endpoint(help_centre_object_id, translation.locale)) payload = self.build_payload(translation) return self.api._put(url, payload=payload) def delete(self, endpoint, translation): url = self.api._build_url(endpoint(id=translation)) return self.api._delete(url) class HelpdeskAttachmentRequest(BaseZendeskRequest): def build_payload(self, ids): return {'attachment_ids': ids} def delete(self, endpoint, article_attachment): url = self.api._build_url(endpoint(id=article_attachment)) return self.api._delete(url) def put(self, api_objects, *args, **kwargs): raise NotImplementedError("You cannot update HelpCentre attachments!") def post(self, endpoint, attachments, article=None, inline=False, file_name=None, content_type=None): if article: url = self.api._build_url(endpoint(id=article)) else: url = self.api._build_url(endpoint()) if endpoint == self.api.endpoint.bulk_attachments: self.check_type(attachments) if isinstance(attachments, Iterable): if len(attachments) > 20: raise TooManyValuesException( 'Maximum 20 attachments objects allowed') ids = [attachment.id for attachment in attachments] else: ids = attachments.id content_type = "application/json" return self.api._post(url, payload=self.build_payload(ids), content_type=content_type) else: if hasattr(attachments, 'read'): file = (file_name if file_name else attachments.name, attachments, content_type) return self.api._post( url, payload={}, files=dict(inline=(None, 'true' if inline else 'false'), file=file)) elif os.path.isfile(attachments): with open(attachments, 'rb') as fp: file = (file_name if file_name else fp.name, fp, content_type) return self.api._post( url, payload={}, files=dict(inline=(None, 'true' if inline else 'false'), file=file)) raise ValueError("Attachment is not a file-like object or valid path!")
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/request.py
request.py
import json import dateutil.parser from zenpy.lib.util import json_encode_for_printing, json_encode_for_zendesk class BaseObject(object): """ Base for all Zenpy objects. Keeps track of which attributes have been modified. """ def __new__(cls, *args, **kwargs): instance = super(BaseObject, cls).__new__(cls) instance.__dict__['_dirty_attributes'] = set() instance.__dict__['_dirty_callback'] = None instance.__dict__['_always_dirty'] = set() return instance def __setattr__(self, key, value): if key not in ('_dirty', '_dirty_callback', '_always_dirty'): self.__dict__['_dirty_attributes'].add(key) if self._dirty_callback is not None: self._dirty_callback() object.__setattr__(self, key, value) def _clean_dirty(self, obj=None): """ Recursively clean self and all child objects. """ obj = obj or self obj.__dict__['_dirty_attributes'].clear() obj._dirty = False for key, val in vars(obj).items(): if isinstance(val, BaseObject): self._clean_dirty(val) else: func = getattr(val, '_clean_dirty', None) if callable(func): func() def _set_dirty(self, obj=None): """ Recursively set self and all child objects _dirty flag. """ obj = obj or self for key, value in vars(obj).items(): if key not in ('api', '_dirty_attributes', '_always_dirty', '_dirty_callback', '_dirty'): setattr(obj, key, value) if isinstance(value, BaseObject): self._set_dirty(value) def to_json(self, indent=2): """ Return self formatted as JSON. """ return json.dumps(self, default=json_encode_for_printing, indent=indent) def to_dict(self, serialize=False): """ This method returns the object as a Python dict. If serialize is passed, only those attributes that have been modified will be included in the result. """ if serialize: encode_method = json_encode_for_zendesk else: encode_method = json_encode_for_printing return json.loads(json.dumps(self._to_dict(serialize=serialize), default=encode_method)) def _to_dict(self, serialize=False): """ This method works by copying self.__dict__, and removing everything that should not be serialized. """ copy_dict = self.__dict__.copy() for key, value in vars(self).items(): # We want to send all ids to Zendesk always if serialize and key == 'id': continue # If this is a Zenpy object, convert it to a dict. if not serialize and isinstance(value, BaseObject): copy_dict[key] = copy_dict.pop(key).to_dict() # This object has a flag indicating it has been dirtied, so we want to send it off. elif serialize and getattr(value, '_dirty', False): continue # Here we have an attribute that should always be sent to Zendesk. elif serialize and key in self._always_dirty: continue # These are for internal tracking, so just delete. elif key in ('api', '_dirty_attributes', '_always_dirty', '_dirty_callback', '_dirty'): del copy_dict[key] # If the attribute has not been modified, do not send it. elif serialize and key not in self._dirty_attributes: del copy_dict[key] # Some reserved words are prefixed with an underscore, remove it here. elif key.startswith('_'): copy_dict[key[1:]] = copy_dict[key] del copy_dict[key] return copy_dict def __repr__(self): class_name = type(self).__name__ if class_name in ('UserField',): return "{}()".format(class_name) def formatted(item): return item if (isinstance(item, int) or item is None) else "'{}'".format(item) for identifier in ('id', 'token', 'key', 'name', 'account_key'): if hasattr(self, identifier): return "{}({}={})".format(class_name, identifier, formatted(getattr(self, identifier))) return "{}()".format(class_name) class Activity(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, actor=None, created_at=None, id=None, title=None, updated_at=None, url=None, user=None, verb=None, **kwargs): self.api = api self.actor = actor self.created_at = created_at self.id = id self.title = title self.updated_at = updated_at self.url = url self.user = user self.verb = verb for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class AgentMacroReference(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, macro_id=None, macro_title=None, type=None, via=None, **kwargs): self.api = api self.id = id self.macro_id = macro_id self.macro_title = macro_title self.type = type self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def macro(self): if self.api and self.macro_id: return self.api._get_macro(self.macro_id) @macro.setter def macro(self, macro): if macro: self.macro_id = macro.id self._macro = macro class Attachment(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, content_type=None, content_url=None, file_name=None, id=None, size=None, thumbnails=None, **kwargs): self.api = api # Comment: The content type of the image. Example value: image/png # Read-only: yes # Type: string self.content_type = content_type # Comment: A full URL where the attachment image file can be downloaded # Read-only: yes # Type: string self.content_url = content_url # Comment: The name of the image file # Read-only: yes # Type: string self.file_name = file_name # Comment: Automatically assigned when created # Read-only: yes # Type: integer self.id = id # Comment: The size of the image file in bytes # Read-only: yes # Type: integer self.size = size # Comment: An array of Photo objects. Note that thumbnails do not have thumbnails. # Read-only: yes # Type: array self.thumbnails = thumbnails for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Audit(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, author_id=None, created_at=None, events=None, id=None, metadata=None, ticket_id=None, via=None, **kwargs): self.api = api self.author_id = author_id self.created_at = created_at self.events = events self.id = id self.metadata = metadata self.ticket_id = ticket_id self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def author(self): if self.api and self.author_id: return self.api._get_user(self.author_id) @author.setter def author(self, author): if author: self.author_id = author.id self._author = author @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def ticket(self): if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket class Automation(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, actions=None, active=None, conditions=None, created_at=None, id=None, position=None, raw_title=None, title=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: An object describing what the automation will do # Type: :class:`Actions` self.actions = actions # Comment: Whether the automation is active # Type: boolean self.active = active # Comment: An object that describes the conditions under which the automation will execute # Type: :class:`Conditions` self.conditions = conditions # Comment: The time the automation was created # Type: date self.created_at = created_at # Comment: Automatically assigned when created # Type: integer self.id = id # Comment: Position of the automation, determines the order they will execute in # Type: integer self.position = position self.raw_title = raw_title # Comment: The title of the automation # Type: string self.title = title # Comment: The time of the last update of the automation # Type: date self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the automation was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the automation """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Brand(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, brand_url=None, created_at=None, default=None, has_help_center=None, help_center_state=None, host_mapping=None, id=None, logo=None, name=None, subdomain=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: If the brand is set as active # Mandatory: no # Read-only: no # Type: boolean self.active = active # Comment: The url of the brand # Mandatory: no # Read-only: no # Type: string self.brand_url = brand_url # Comment: The time the brand was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Is the brand the default brand for this account # Mandatory: no # Read-only: no # Type: boolean self.default = default # Comment: If the brand has a Help Center # Mandatory: no # Read-only: no # Type: boolean self.has_help_center = has_help_center # Comment: The state of the Help Center: enabled, disabled, or restricted # Mandatory: no # Read-only: yes # Type: string self.help_center_state = help_center_state # Comment: The hostmapping to this brand, if any (only admins view this key) # Mandatory: no # Read-only: no # Type: string self.host_mapping = host_mapping # Comment: Automatically assigned when the brand is created # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: Logo image for this brand # Mandatory: no # Read-only: no # Type: :class:`Attachment` self.logo = logo # Comment: The name of the brand # Mandatory: yes # Read-only: no # Type: string self.name = name # Comment: The subdomain of the brand # Mandatory: yes # Read-only: no # Type: string self.subdomain = subdomain # Comment: The time of the last update of the brand # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this brand # Mandatory: no # Read-only: yes # Type: string self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the brand was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the brand """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class CcEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, recipients=None, type=None, via=None, **kwargs): self.api = api self.id = id self.recipients = recipients self.type = type self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class ChangeEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, field_name=None, id=None, previous_value=None, type=None, value=None, **kwargs): self.api = api self.field_name = field_name self.id = id self.previous_value = previous_value self.type = type self.value = value for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Comment(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, attachments=None, author_id=None, body=None, created_at=None, id=None, metadata=None, public=None, type=None, via=None, **kwargs): self.api = api self.attachments = attachments self.author_id = author_id self.body = body self.created_at = created_at self.id = id self.metadata = metadata self.public = public self.type = type self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def author(self): if self.api and self.author_id: return self.api._get_user(self.author_id) @author.setter def author(self, author): if author: self.author_id = author.id self._author = author @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created class CommentPrivacyChangeEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, comment_id=None, id=None, public=None, type=None, **kwargs): self.api = api self.comment_id = comment_id self.id = id self.public = public self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def comment(self): if self.api and self.comment_id: return self.api._get_comment(self.comment_id) @comment.setter def comment(self, comment): if comment: self.comment_id = comment.id self._comment = comment class Conditions(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, all=None, any=None, **kwargs): self.api = api self.all = all self.any = any for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class CreateEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, field_name=None, id=None, type=None, value=None, **kwargs): self.api = api self.field_name = field_name self.id = id self.type = type self.value = value for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class CustomAgentRole(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, configuration=None, created_at=None, description=None, id=None, name=None, role_type=None, updated_at=None, **kwargs): self.api = api self.configuration = configuration self.created_at = created_at self.description = description self.id = id self.name = name self.role_type = role_type self.updated_at = updated_at for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class CustomField(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, value=None, **kwargs): self.api = api self.id = id self.value = value for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class CustomFieldOption(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, name=None, position=None, raw_name=None, url=None, value=None, **kwargs): self.api = api self.id = id self.name = name self.position = position self.raw_name = raw_name self.url = url self.value = value for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Definitions(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, all=None, any=None, **kwargs): self.api = api self.all = all self.any = any for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class ErrorEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, message=None, type=None, **kwargs): self.api = api self.id = id self.message = message self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Export(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, status=None, view_id=None, **kwargs): self.api = api self.status = status self.view_id = view_id for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def view(self): if self.api and self.view_id: return self.api._get_view(self.view_id) @view.setter def view(self, view): if view: self.view_id = view.id self._view = view class ExternalEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, id=None, resource=None, type=None, **kwargs): self.api = api self.body = body self.id = id self.resource = resource self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class FacebookCommentEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, attachments=None, author_id=None, body=None, data=None, graph_object_id=None, html_body=None, id=None, public=None, trusted=None, type=None, **kwargs): self.api = api self.attachments = attachments self.author_id = author_id self.body = body self.data = data self.graph_object_id = graph_object_id self.html_body = html_body self.id = id self.public = public self.trusted = trusted self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def author(self): if self.api and self.author_id: return self.api._get_user(self.author_id) @author.setter def author(self, author): if author: self.author_id = author.id self._author = author class FacebookEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, communication=None, id=None, page=None, ticket_via=None, type=None, **kwargs): self.api = api self.body = body self.communication = communication self.id = id self.page = page self.ticket_via = ticket_via self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Group(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, deleted=None, id=None, name=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: The time the group was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Deleted groups get marked as such # Mandatory: no # Read-only: yes # Type: boolean self.deleted = deleted # Comment: Automatically assigned when creating groups # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The name of the group # Mandatory: yes # Read-only: no # Type: string self.name = name # Comment: The time of the last update of the group # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this group # Mandatory: no # Read-only: yes # Type: string self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the group was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the group """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class GroupMembership(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, default=None, group_id=None, id=None, updated_at=None, url=None, user_id=None, **kwargs): self.api = api # Comment: The time the membership was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: If true, tickets assigned directly to the agent will assume this membership's group. # Mandatory: no # Read-only: no # Type: boolean self.default = default # Comment: The id of a group # Mandatory: yes # Read-only: no # Type: integer self.group_id = group_id # Comment: Automatically assigned upon creation # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The time of the last update of the membership # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this record # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: The id of an agent # Mandatory: yes # Read-only: no # Type: integer self.user_id = user_id for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the membership was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def group(self): """ | Comment: The id of a group """ if self.api and self.group_id: return self.api._get_group(self.group_id) @group.setter def group(self, group): if group: self.group_id = group.id self._group = group @property def updated(self): """ | Comment: The time of the last update of the membership """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated @property def user(self): """ | Comment: The id of an agent """ if self.api and self.user_id: return self.api._get_user(self.user_id) @user.setter def user(self, user): if user: self.user_id = user.id self._user = user class Identity(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, deliverable_state=None, id=None, primary=None, type=None, undeliverable_count=None, updated_at=None, url=None, user_id=None, value=None, verified=None, **kwargs): self.api = api self.created_at = created_at self.deliverable_state = deliverable_state self.id = id self.primary = primary self.type = type self.undeliverable_count = undeliverable_count self.updated_at = updated_at self.url = url self.user_id = user_id self.value = value self.verified = verified for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated @property def user(self): if self.api and self.user_id: return self.api._get_user(self.user_id) @user.setter def user(self, user): if user: self.user_id = user.id self._user = user class Item(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, default_locale_id=None, id=None, name=None, outdated=None, placeholder=None, updated_at=None, url=None, variants=None, **kwargs): self.api = api self.created_at = created_at self.default_locale_id = default_locale_id self.id = id self.name = name self.outdated = outdated self.placeholder = placeholder self.updated_at = updated_at self.url = url self.variants = variants for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def default_locale(self): if self.api and self.default_locale_id: return self.api._get_default_locale(self.default_locale_id) @default_locale.setter def default_locale(self, default_locale): if default_locale: self.default_locale_id = default_locale.id self._default_locale = default_locale @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class JobStatus(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, message=None, progress=None, results=None, status=None, total=None, url=None, **kwargs): self.api = api self.id = id self.message = message self.progress = progress self.results = results self.status = status self.total = total self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class JobStatusResult(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, action=None, errors=None, id=None, status=None, success=None, title=None, **kwargs): self.api = api self.action = action self.errors = errors self.id = id self.status = status self.success = success self.title = title for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Link(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, id=None, issue_id=None, issue_key=None, ticket_id=None, updated_at=None, url=None, **kwargs): self.api = api self.created_at = created_at self.id = id self.issue_id = issue_id self.issue_key = issue_key self.ticket_id = ticket_id self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def ticket(self): if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Locale(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, default=None, id=None, locale=None, name=None, native_name=None, presentation_name=None, rtl=None, updated_at=None, url=None, **kwargs): self.api = api self.created_at = created_at self.default = default # Description: Either the ID or the bcp-47 code of the locale (es-419, en-us, pr-br) # Type: string self.id = id self.locale = locale self.name = name self.native_name = native_name self.presentation_name = presentation_name self.rtl = rtl self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class LogmeinTranscriptEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, id=None, type=None, **kwargs): self.api = api self.body = body self.id = id self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Macro(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, actions=None, active=None, created_at=None, description=None, id=None, position=None, restriction=None, title=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: An object describing what the macro will do # Type: :class:`Actions` self.actions = actions # Comment: Useful for determining if the macro should be displayed # Type: boolean self.active = active # Comment: The time the macro was created # Type: date self.created_at = created_at # Comment: The description of the macro # Type: string self.description = description # Comment: Automatically assigned when created # Type: integer self.id = id # Comment: The position of the macro # Type: integer self.position = position # Comment: Who may access this macro. Will be null when everyone in the account can access it # Type: object self.restriction = restriction # Comment: The title of the macro # Type: string self.title = title # Comment: The time of the last update of the macro # Type: date self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the macro was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the macro """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class MacroResult(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, ticket=None, **kwargs): self.api = api self.ticket = ticket for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Metadata(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, custom=None, system=None, **kwargs): self.api = api self.custom = custom self.system = system for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class NotificationEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, id=None, recipients=None, subject=None, type=None, via=None, **kwargs): self.api = api self.body = body self.id = id self.recipients = recipients self.subject = subject self.type = type self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Organization(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, details=None, domain_names=None, external_id=None, group_id=None, id=None, name=None, notes=None, organization_fields=None, shared_comments=None, shared_tickets=None, tags=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: The time the organization was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Any details obout the organization, such as the address # Mandatory: no # Read-only: no # Type: string self.details = details # Comment: An array of domain names associated with this organization # Mandatory: no # Read-only: no # Type: array self.domain_names = domain_names # Comment: A unique external id to associate organizations to an external record # Mandatory: no # Read-only: no # Type: string self.external_id = external_id # Comment: New tickets from users in this organization are automatically put in this group # Mandatory: no # Read-only: no # Type: integer self.group_id = group_id # Comment: Automatically assigned when the organization is created # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: A unique name for the organization # Mandatory: yes # Read-only: no # Type: string self.name = name # Comment: Any notes you have about the organization # Mandatory: no # Read-only: no # Type: string self.notes = notes # Comment: Custom fields for this organization # Mandatory: no # Read-only: no # Type: :class:`hash` self.organization_fields = organization_fields # Comment: End users in this organization are able to see each other's comments on tickets # Mandatory: no # Read-only: no # Type: boolean self.shared_comments = shared_comments # Comment: End users in this organization are able to see each other's tickets # Mandatory: no # Read-only: no # Type: boolean self.shared_tickets = shared_tickets # Comment: The tags of the organization # Mandatory: no # Read-only: no # Type: array self.tags = tags # Comment: The time of the last update of the organization # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this organization # Mandatory: no # Read-only: yes # Type: string self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the organization was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def group(self): """ | Comment: New tickets from users in this organization are automatically put in this group """ if self.api and self.group_id: return self.api._get_group(self.group_id) @group.setter def group(self, group): if group: self.group_id = group.id self._group = group @property def updated(self): """ | Comment: The time of the last update of the organization """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class OrganizationActivityEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, id=None, recipients=None, subject=None, type=None, via=None, **kwargs): self.api = api self.body = body self.id = id self.recipients = recipients self.subject = subject self.type = type self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class OrganizationField(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, created_at=None, description=None, id=None, key=None, position=None, raw_description=None, raw_title=None, regexp_for_validation=None, title=None, type=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: If true, this field is available for use # Mandatory: no # Read-only: no # Type: boolean self.active = active # Comment: The time the ticket field was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: User-defined description of this field's purpose # Mandatory: no # Read-only: no # Type: string self.description = description # Comment: Automatically assigned upon creation # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: A unique key that identifies this custom field. This is used for updating the field and referencing in placeholders. # Mandatory: on create # Read-only: no # Type: string self.key = key # Comment: Ordering of the field relative to other fields # Mandatory: no # Read-only: no # Type: integer self.position = position # Comment: The dynamic content placeholder, if present, or the "description" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_description = raw_description # Comment: The dynamic content placeholder, if present, or the "title" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_title = raw_title # Comment: Regular expression field only. The validation pattern for a field value to be deemed valid. # Mandatory: no # Read-only: no # Type: string self.regexp_for_validation = regexp_for_validation # Comment: The title of the custom field # Mandatory: yes # Read-only: no # Type: string self.title = title # Comment: Type of the custom field: "checkbox", "date", "decimal", "dropdown", "integer", "regexp", "text", or "textarea" # Mandatory: yes # Read-only: no # Type: string self.type = type # Comment: The time of the last update of the ticket field # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The URL for this resource # Mandatory: no # Read-only: yes # Type: string self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the ticket field was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the ticket field """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class OrganizationMembership(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, default=None, id=None, organization_id=None, updated_at=None, url=None, user_id=None, **kwargs): self.api = api # Comment: When this record was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Denotes whether this is the default organization membership for the user. If false, returns null # Mandatory: yes # Read-only: no # Type: boolean self.default = default # Comment: Automatically assigned when the membership is created # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The ID of the organization associated with this user, in this membership # Mandatory: yes # Read-only: yes # Type: integer self.organization_id = organization_id # Comment: When this record last got updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this membership # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: The ID of the user for whom this memberships belongs # Mandatory: yes # Read-only: yes # Type: integer self.user_id = user_id for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: When this record was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def organization(self): """ | Comment: The ID of the organization associated with this user, in this membership """ if self.api and self.organization_id: return self.api._get_organization(self.organization_id) @organization.setter def organization(self, organization): if organization: self.organization_id = organization.id self._organization = organization @property def updated(self): """ | Comment: When this record last got updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated @property def user(self): """ | Comment: The ID of the user for whom this memberships belongs """ if self.api and self.user_id: return self.api._get_user(self.user_id) @user.setter def user(self, user): if user: self.user_id = user.id self._user = user class PolicyMetric(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, business_hours=None, metric=None, priority=None, target=None, **kwargs): self.api = api self.business_hours = business_hours self.metric = metric self.priority = priority self.target = target for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class PushEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, type=None, value=None, value_reference=None, **kwargs): self.api = api self.id = id self.type = type self.value = value self.value_reference = value_reference for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Recipient(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, delivered_at=None, delivery_id=None, id=None, survey_id=None, survey_name=None, updated_at=None, user_email=None, user_id=None, user_name=None, **kwargs): self.api = api self.created_at = created_at self.delivered_at = delivered_at self.delivery_id = delivery_id self.id = id self.survey_id = survey_id self.survey_name = survey_name self.updated_at = updated_at self.user_email = user_email self.user_id = user_id self.user_name = user_name for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def delivered(self): if self.delivered_at: return dateutil.parser.parse(self.delivered_at) @delivered.setter def delivered(self, delivered): if delivered: self.delivered_at = delivered @property def delivery(self): if self.api and self.delivery_id: return self.api._get_delivery(self.delivery_id) @delivery.setter def delivery(self, delivery): if delivery: self.delivery_id = delivery.id self._delivery = delivery @property def survey(self): if self.api and self.survey_id: return self.api._get_survey(self.survey_id) @survey.setter def survey(self, survey): if survey: self.survey_id = survey.id self._survey = survey @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated @property def user(self): if self.api and self.user_id: return self.api._get_user(self.user_id) @user.setter def user(self, user): if user: self.user_id = user.id self._user = user class RecipientAddress(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, brand_id=None, created_at=None, default=None, email=None, forwarding_status=None, id=None, name=None, spf_status=None, updated_at=None, **kwargs): self.api = api self.brand_id = brand_id self.created_at = created_at self.default = default self.email = email self.forwarding_status = forwarding_status self.id = id self.name = name self.spf_status = spf_status self.updated_at = updated_at for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def brand(self): if self.api and self.brand_id: return self.api._get_brand(self.brand_id) @brand.setter def brand(self, brand): if brand: self.brand_id = brand.id self._brand = brand @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Request(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, assignee_id=None, can_be_solved_by_me=None, collaborator_ids=None, created_at=None, custom_fields=None, description=None, due_at=None, fields=None, id=None, organization_id=None, priority=None, requester_id=None, status=None, subject=None, type=None, updated_at=None, url=None, via=None, **kwargs): self.api = api # Comment: The id of the assignee if the field is visible to end users # Mandatory: no # Read-only: yes # Type: integer self.assignee_id = assignee_id # Comment: If true, end user can mark request as solved. # Mandatory: no # Read-only: yes # Type: boolean self.can_be_solved_by_me = can_be_solved_by_me # Comment: Who are currently CC'ed on the ticket # Mandatory: no # Read-only: yes # Type: array self.collaborator_ids = collaborator_ids # Comment: When this record was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: The fields and entries for this request # Mandatory: no # Read-only: no # Type: :class:`Array` self.custom_fields = custom_fields # Comment: The first comment on the request # Mandatory: yes # Read-only: yes # Type: string self.description = description # Comment: When the task is due (only applies if the request is of type "task") # Mandatory: no # Read-only: no # Type: date self.due_at = due_at self.fields = fields # Comment: Automatically assigned when creating requests # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The organization of the requester # Mandatory: no # Read-only: yes # Type: integer self.organization_id = organization_id # Comment: The priority of the request, "low", "normal", "high", "urgent" # Mandatory: no # Read-only: no # Type: string self.priority = priority # Comment: The id of the requester # Mandatory: no # Read-only: yes # Type: integer self.requester_id = requester_id # Comment: The state of the request, "new", "open", "pending", "hold", "solved", "closed" # Mandatory: no # Read-only: no # Type: string self.status = status # Comment: The value of the subject field for this request if the subject field is visible to end users; a truncated version of the description otherwise # Mandatory: yes # Read-only: no # Type: string self.subject = subject # Comment: The type of the request, "question", "incident", "problem", "task" # Mandatory: no # Read-only: no # Type: string self.type = type # Comment: When this record last got updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this request # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: This object explains how the request was created # Mandatory: no # Read-only: yes # Type: :class:`Via` self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def assignee(self): """ | Comment: The id of the assignee if the field is visible to end users """ if self.api and self.assignee_id: return self.api._get_user(self.assignee_id) @assignee.setter def assignee(self, assignee): if assignee: self.assignee_id = assignee.id self._assignee = assignee @property def collaborators(self): """ | Comment: Who are currently CC'ed on the ticket """ if self.api and self.collaborator_ids: return self.api._get_users(self.collaborator_ids) @collaborators.setter def collaborators(self, collaborators): if collaborators: self.collaborator_ids = [o.id for o in collaborators] self._collaborators = collaborators @property def created(self): """ | Comment: When this record was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def due(self): """ | Comment: When the task is due (only applies if the request is of type "task") """ if self.due_at: return dateutil.parser.parse(self.due_at) @due.setter def due(self, due): if due: self.due_at = due @property def organization(self): """ | Comment: The organization of the requester """ if self.api and self.organization_id: return self.api._get_organization(self.organization_id) @organization.setter def organization(self, organization): if organization: self.organization_id = organization.id self._organization = organization @property def requester(self): """ | Comment: The id of the requester """ if self.api and self.requester_id: return self.api._get_user(self.requester_id) @requester.setter def requester(self, requester): if requester: self.requester_id = requester.id self._requester = requester @property def updated(self): """ | Comment: When this record last got updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Response(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, comment=None, delivered_at=None, delivery_id=None, id=None, rated_at=None, rating=None, recipient_id=None, survey_id=None, survey_name=None, user_email=None, user_id=None, user_name=None, **kwargs): self.api = api self.comment = comment self.delivered_at = delivered_at self.delivery_id = delivery_id self.id = id self.rated_at = rated_at self.rating = rating self.recipient_id = recipient_id self.survey_id = survey_id self.survey_name = survey_name self.user_email = user_email self.user_id = user_id self.user_name = user_name for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def delivered(self): if self.delivered_at: return dateutil.parser.parse(self.delivered_at) @delivered.setter def delivered(self, delivered): if delivered: self.delivered_at = delivered @property def delivery(self): if self.api and self.delivery_id: return self.api._get_delivery(self.delivery_id) @delivery.setter def delivery(self, delivery): if delivery: self.delivery_id = delivery.id self._delivery = delivery @property def rated(self): if self.rated_at: return dateutil.parser.parse(self.rated_at) @rated.setter def rated(self, rated): if rated: self.rated_at = rated @property def recipient(self): if self.api and self.recipient_id: return self.api._get_user(self.recipient_id) @recipient.setter def recipient(self, recipient): if recipient: self.recipient_id = recipient.id self._recipient = recipient @property def survey(self): if self.api and self.survey_id: return self.api._get_survey(self.survey_id) @survey.setter def survey(self, survey): if survey: self.survey_id = survey.id self._survey = survey @property def user(self): if self.api and self.user_id: return self.api._get_user(self.user_id) @user.setter def user(self, user): if user: self.user_id = user.id self._user = user class SatisfactionRating(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, assignee_id=None, created_at=None, group_id=None, id=None, requester_id=None, score=None, ticket_id=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: The id of agent assigned to at the time of rating # Mandatory: yes # Read-only: yes # Type: integer self.assignee_id = assignee_id # Comment: The time the satisfaction rating got created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: The id of group assigned to at the time of rating # Mandatory: yes # Read-only: yes # Type: integer self.group_id = group_id # Comment: Automatically assigned upon creation # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The id of ticket requester submitting the rating # Mandatory: yes # Read-only: yes # Type: integer self.requester_id = requester_id # Comment: The rating: "offered", "unoffered", "good" or "bad" # Mandatory: yes # Read-only: no # Type: string self.score = score # Comment: The id of ticket being rated # Mandatory: yes # Read-only: yes # Type: integer self.ticket_id = ticket_id # Comment: The time the satisfaction rating got updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this rating # Mandatory: no # Read-only: yes # Type: string self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def assignee(self): """ | Comment: The id of agent assigned to at the time of rating """ if self.api and self.assignee_id: return self.api._get_user(self.assignee_id) @assignee.setter def assignee(self, assignee): if assignee: self.assignee_id = assignee.id self._assignee = assignee @property def created(self): """ | Comment: The time the satisfaction rating got created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def group(self): """ | Comment: The id of group assigned to at the time of rating """ if self.api and self.group_id: return self.api._get_group(self.group_id) @group.setter def group(self, group): if group: self.group_id = group.id self._group = group @property def requester(self): """ | Comment: The id of ticket requester submitting the rating """ if self.api and self.requester_id: return self.api._get_user(self.requester_id) @requester.setter def requester(self, requester): if requester: self.requester_id = requester.id self._requester = requester @property def ticket(self): """ | Comment: The id of ticket being rated """ if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket @property def updated(self): """ | Comment: The time the satisfaction rating got updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class SatisfactionRatingEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, assignee_id=None, body=None, id=None, score=None, type=None, **kwargs): self.api = api self.assignee_id = assignee_id self.body = body self.id = id self.score = score self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def assignee(self): if self.api and self.assignee_id: return self.api._get_user(self.assignee_id) @assignee.setter def assignee(self, assignee): if assignee: self.assignee_id = assignee.id self._assignee = assignee class Schedule(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, id=None, intervals=None, name=None, time_zone=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: Time the schedule was created # Type: date self.created_at = created_at # Comment: Automatically assigned upon creation # Type: integer self.id = id # Comment: Array of intervals for the schedule # Type: array self.intervals = intervals # Comment: Name of the schedule # Type: string self.name = name # Comment: Time zone of the schedule # Type: string self.time_zone = time_zone # Comment: Time the schedule was last updated # Type: date self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: Time the schedule was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: Time the schedule was last updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class SharingAgreement(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, id=None, name=None, partner_name=None, remote_subdomain=None, status=None, type=None, **kwargs): self.api = api # Comment: The time the record was created # Type: date self.created_at = created_at # Comment: Automatically assigned upon creation # Type: integer self.id = id # Comment: Name of this sharing agreement # Type: string self.name = name # Comment: Can be one of the following: 'jira', null # Type: string self.partner_name = partner_name # Comment: Subdomain of the remote account or null if not associated with an account # Type: string self.remote_subdomain = remote_subdomain # Comment: Can be one of the following: 'accepted', 'declined', 'pending', 'inactive' # Type: string self.status = status # Comment: Can be one of the following: 'inbound', 'outbound' # Type: string self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the record was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created class Skip(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, id=None, reason=None, ticket=None, ticket_id=None, updated_at=None, user_id=None, **kwargs): self.api = api self.created_at = created_at self.id = id self.reason = reason self.ticket = ticket self.ticket_id = ticket_id self.updated_at = updated_at self.user_id = user_id for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated @property def user(self): if self.api and self.user_id: return self.api._get_user(self.user_id) @user.setter def user(self, user): if user: self.user_id = user.id self._user = user class SlaPolicy(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created_at=None, description=None, filter=None, id=None, policy_metrics=None, position=None, title=None, updated_at=None, url=None, **kwargs): self.api = api self.created_at = created_at self.description = description self.filter = filter self.id = id self.policy_metrics = policy_metrics self.position = position self.title = title self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Source(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, from_=None, rel=None, to=None, **kwargs): self.api = api self.from_ = from_ self.rel = rel self.to = to for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Status(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, action=None, errors=None, id=None, status=None, success=None, title=None, **kwargs): self.api = api self.action = action self.errors = errors self.id = id self.status = status self.success = success self.title = title for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class SuspendedTicket(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, author=None, brand_id=None, cause=None, content=None, created_at=None, id=None, recipient=None, subject=None, ticket_id=None, updated_at=None, url=None, via=None, **kwargs): self.api = api # Comment: The author id (if available), name and email # Mandatory: no # Read-only: yes # Type: object self.author = author # Comment: The id of the brand this ticket is associated with - only applicable for enterprise accounts # Mandatory: no # Read-only: yes # Type: integer self.brand_id = brand_id # Comment: Why the ticket was suspended # Mandatory: no # Read-only: yes # Type: string self.cause = cause # Comment: The content that was flagged # Mandatory: no # Read-only: yes # Type: string self.content = content # Comment: When this record was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Automatically assigned # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The original recipient e-mail address of the ticket # Mandatory: no # Read-only: yes # Type: string self.recipient = recipient # Comment: The value of the subject field for this ticket # Mandatory: no # Read-only: yes # Type: string self.subject = subject # Comment: The ticket ID this suspended email is associated with, if available # Mandatory: no # Read-only: yes # Type: integer self.ticket_id = ticket_id # Comment: When this record last got updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this ticket # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: This object explains how the ticket was created # Mandatory: no # Read-only: yes # Type: :class:`Via` self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def brand(self): """ | Comment: The id of the brand this ticket is associated with - only applicable for enterprise accounts """ if self.api and self.brand_id: return self.api._get_brand(self.brand_id) @brand.setter def brand(self, brand): if brand: self.brand_id = brand.id self._brand = brand @property def created(self): """ | Comment: When this record was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def ticket(self): """ | Comment: The ticket ID this suspended email is associated with, if available """ if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket @property def updated(self): """ | Comment: When this record last got updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class System(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, client=None, ip_address=None, latitude=None, location=None, longitude=None, **kwargs): self.api = api self.client = client self.ip_address = ip_address self.latitude = latitude self.location = location self.longitude = longitude for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Tag(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, count=None, name=None, **kwargs): self.api = api self.count = count self.name = name for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Target(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, content_type=None, created_at=None, id=None, method=None, password=None, target_url=None, title=None, type=None, url=None, username=None, **kwargs): self.api = api # Comment: Whether or not the target is activated # Mandatory: # Type: boolean self.active = active self.content_type = content_type # Comment: The time the target was created # Mandatory: # Type: date self.created_at = created_at # Comment: Automatically assigned when created # Mandatory: # Type: integer self.id = id self.method = method self.password = password self.target_url = target_url # Comment: A name for the target # Mandatory: yes # Type: string self.title = title # Comment: A pre-defined target, such as "basecamp_target". See the additional attributes for the type that follow # Mandatory: # Type: string self.type = type self.url = url self.username = username for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the target was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created class Thumbnail(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, content_type=None, content_url=None, file_name=None, id=None, size=None, **kwargs): self.api = api self.content_type = content_type self.content_url = content_url self.file_name = file_name self.id = id self.size = size for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Ticket(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, assignee_id=None, brand_id=None, collaborator_ids=None, created_at=None, custom_fields=None, description=None, due_at=None, external_id=None, fields=None, forum_topic_id=None, group_id=None, has_incidents=None, id=None, organization_id=None, priority=None, problem_id=None, raw_subject=None, recipient=None, requester_id=None, satisfaction_rating=None, sharing_agreement_ids=None, status=None, subject=None, submitter_id=None, tags=None, type=None, updated_at=None, url=None, via=None, **kwargs): self.api = api # Comment: The agent currently assigned to the ticket # Mandatory: no # Read-only: no # Type: integer self.assignee_id = assignee_id # Comment: Enterprise only. The id of the brand this ticket is associated with # Mandatory: no # Read-only: no # Type: integer self.brand_id = brand_id # Comment: The ids of users currently cc'ed on the ticket # Mandatory: no # Read-only: no # Type: array self.collaborator_ids = collaborator_ids # Comment: When this record was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Custom fields for the ticket. See Setting custom field values # Mandatory: no # Read-only: no # Type: array self.custom_fields = custom_fields # Comment: The first comment on the ticket # Mandatory: no # Read-only: yes # Type: string self.description = description # Comment: If this is a ticket of type "task" it has a due date. Due date format uses ISO 8601 format. # Mandatory: no # Read-only: no # Type: date self.due_at = due_at # Comment: An id you can use to link Zendesk Support tickets to local records # Mandatory: no # Read-only: no # Type: string self.external_id = external_id self.fields = fields # Comment: The topic this ticket originated from, if any # Mandatory: no # Read-only: no # Type: integer self.forum_topic_id = forum_topic_id # Comment: The group this ticket is assigned to # Mandatory: no # Read-only: no # Type: integer self.group_id = group_id # Comment: Is true of this ticket has been marked as a problem, false otherwise # Mandatory: no # Read-only: yes # Type: boolean self.has_incidents = has_incidents # Comment: Automatically assigned when the ticket is created # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The organization of the requester. You can only specify the ID of an organization associated with the requester. See Organization Memberships # Mandatory: no # Read-only: no # Type: integer self.organization_id = organization_id # Comment: The urgency with which the ticket should be addressed. Possible values: "urgent", "high", "normal", "low" # Mandatory: no # Read-only: no # Type: string self.priority = priority # Comment: For tickets of type "incident", the ID of the problem the incident is linked to # Mandatory: no # Read-only: no # Type: integer self.problem_id = problem_id # Comment: The dynamic content placeholder, if present, or the "subject" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_subject = raw_subject # Comment: The original recipient e-mail address of the ticket # Mandatory: no # Read-only: no # Type: string self.recipient = recipient # Comment: The user who requested this ticket # Mandatory: yes # Read-only: no # Type: integer self.requester_id = requester_id # Comment: The satisfaction rating of the ticket, if it exists, or the state of satisfaction, 'offered' or 'unoffered' # Mandatory: no # Read-only: yes # Type: object self.satisfaction_rating = satisfaction_rating # Comment: The ids of the sharing agreements used for this ticket # Mandatory: no # Read-only: yes # Type: array self.sharing_agreement_ids = sharing_agreement_ids # Comment: The state of the ticket. Possible values: "new", "open", "pending", "hold", "solved", "closed" # Mandatory: no # Read-only: no # Type: string self.status = status # Comment: The value of the subject field for this ticket # Mandatory: no # Read-only: no # Type: string self.subject = subject # Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket # Mandatory: no # Read-only: no # Type: integer self.submitter_id = submitter_id # Comment: The array of tags applied to this ticket # Mandatory: no # Read-only: no # Type: array self.tags = tags # Comment: The type of this ticket. Possible values: "problem", "incident", "question" or "task" # Mandatory: no # Read-only: no # Type: string self.type = type # Comment: When this record last got updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The API url of this ticket # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: This object explains how the ticket was created # Mandatory: no # Read-only: yes # Type: :class:`Via` self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def assignee(self): """ | Comment: The agent currently assigned to the ticket """ if self.api and self.assignee_id: return self.api._get_user(self.assignee_id) @assignee.setter def assignee(self, assignee): if assignee: self.assignee_id = assignee.id self._assignee = assignee @property def brand(self): """ | Comment: Enterprise only. The id of the brand this ticket is associated with """ if self.api and self.brand_id: return self.api._get_brand(self.brand_id) @brand.setter def brand(self, brand): if brand: self.brand_id = brand.id self._brand = brand @property def collaborators(self): """ | Comment: The ids of users currently cc'ed on the ticket """ if self.api and self.collaborator_ids: return self.api._get_users(self.collaborator_ids) @collaborators.setter def collaborators(self, collaborators): if collaborators: self.collaborator_ids = [o.id for o in collaborators] self._collaborators = collaborators @property def created(self): """ | Comment: When this record was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def due(self): """ | Comment: If this is a ticket of type "task" it has a due date. Due date format uses ISO 8601 format. """ if self.due_at: return dateutil.parser.parse(self.due_at) @due.setter def due(self, due): if due: self.due_at = due @property def forum_topic(self): """ | Comment: The topic this ticket originated from, if any """ if self.api and self.forum_topic_id: return self.api._get_topic(self.forum_topic_id) @forum_topic.setter def forum_topic(self, forum_topic): if forum_topic: self.forum_topic_id = forum_topic.id self._forum_topic = forum_topic @property def group(self): """ | Comment: The group this ticket is assigned to """ if self.api and self.group_id: return self.api._get_group(self.group_id) @group.setter def group(self, group): if group: self.group_id = group.id self._group = group @property def organization(self): """ | Comment: The organization of the requester. You can only specify the ID of an organization associated with the requester. See Organization Memberships """ if self.api and self.organization_id: return self.api._get_organization(self.organization_id) @organization.setter def organization(self, organization): if organization: self.organization_id = organization.id self._organization = organization @property def problem(self): """ | Comment: For tickets of type "incident", the ID of the problem the incident is linked to """ if self.api and self.problem_id: return self.api._get_problem(self.problem_id) @problem.setter def problem(self, problem): if problem: self.problem_id = problem.id self._problem = problem @property def requester(self): """ | Comment: The user who requested this ticket """ if self.api and self.requester_id: return self.api._get_user(self.requester_id) @requester.setter def requester(self, requester): if requester: self.requester_id = requester.id self._requester = requester @property def sharing_agreements(self): """ | Comment: The ids of the sharing agreements used for this ticket """ if self.api and self.sharing_agreement_ids: return self.api._get_sharing_agreements(self.sharing_agreement_ids) @sharing_agreements.setter def sharing_agreements(self, sharing_agreements): if sharing_agreements: self.sharing_agreement_ids = [o.id for o in sharing_agreements] self._sharing_agreements = sharing_agreements @property def submitter(self): """ | Comment: The user who submitted the ticket. The submitter always becomes the author of the first comment on the ticket """ if self.api and self.submitter_id: return self.api._get_user(self.submitter_id) @submitter.setter def submitter(self, submitter): if submitter: self.submitter_id = submitter.id self._submitter = submitter @property def updated(self): """ | Comment: When this record last got updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class TicketAudit(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, audit=None, ticket=None, **kwargs): self.api = api self.audit = audit self.ticket = ticket for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class TicketEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, child_events=None, id=None, ticket_id=None, timestamp=None, updater_id=None, via=None, **kwargs): self.api = api self.child_events = child_events self.id = id self.ticket_id = ticket_id self.timestamp = timestamp self.updater_id = updater_id self.via = via for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def ticket(self): if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket @property def updater(self): if self.api and self.updater_id: return self.api._get_user(self.updater_id) @updater.setter def updater(self, updater): if updater: self.updater_id = updater.id self._updater = updater class TicketField(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, collapsed_for_agents=None, created_at=None, description=None, editable_in_portal=None, id=None, position=None, raw_description=None, raw_title=None, raw_title_in_portal=None, regexp_for_validation=None, required=None, required_in_portal=None, tag=None, title=None, title_in_portal=None, type=None, updated_at=None, url=None, visible_in_portal=None, **kwargs): self.api = api # Comment: Whether this field is available # Mandatory: no # Read-only: no # Type: boolean self.active = active # Comment: If this field should be shown to agents by default or be hidden alongside infrequently used fields. Classic interface only # Mandatory: no # Read-only: no # Type: boolean self.collapsed_for_agents = collapsed_for_agents # Comment: The time the ticket field was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: The description of the purpose of this ticket field, shown to users # Mandatory: no # Read-only: no # Type: string self.description = description # Comment: Whether this field is editable by end users # Mandatory: no # Read-only: no # Type: boolean self.editable_in_portal = editable_in_portal # Comment: Automatically assigned upon creation # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: A relative position for the ticket fields that determines the order of ticket fields on a ticket. Note that positions 0 to 7 are reserved for system fields # Mandatory: no # Read-only: no # Type: integer self.position = position # Comment: The dynamic content placeholder, if present, or the "description" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_description = raw_description # Comment: The dynamic content placeholder, if present, or the "title" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_title = raw_title # Comment: The dynamic content placeholder, if present, or the "title_in_portal" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_title_in_portal = raw_title_in_portal # Comment: Regular expression field only. The validation pattern for a field value to be deemed valid. # Mandatory: no # Read-only: no # Type: string self.regexp_for_validation = regexp_for_validation # Comment: If it's required for this field to have a value when updated by agents # Mandatory: no # Read-only: no # Type: boolean self.required = required # Comment: If it's required for this field to have a value when updated by end users # Mandatory: no # Read-only: no # Type: boolean self.required_in_portal = required_in_portal # Comment: A tag value to set for checkbox fields when checked # Mandatory: no # Read-only: no # Type: string self.tag = tag # Comment: The title of the ticket field # Mandatory: yes # Read-only: no # Type: string self.title = title # Comment: The title of the ticket field when shown to end users # Mandatory: no # Read-only: no # Type: string self.title_in_portal = title_in_portal # Comment: The type of the ticket field: "checkbox", "date", "decimal", "integer", "regexp", "tagger", "text", or "textarea". Type is not editable once created. # Mandatory: yes # Read-only: no # Type: string self.type = type # Comment: The time of the last update of the ticket field # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The URL for this resource # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: Whether this field is available to end users # Mandatory: no # Read-only: no # Type: boolean self.visible_in_portal = visible_in_portal for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the ticket field was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the ticket field """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class TicketForm(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, created_at=None, default=None, display_name=None, end_user_visible=None, id=None, in_all_brands=None, in_all_organizations=None, name=None, position=None, raw_display_name=None, raw_name=None, restricted_brand_ids=None, restricted_organization_ids=None, ticket_field_ids=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: If the form is set as active # Mandatory: no # Read-only: no # Type: boolean self.active = active self.created_at = created_at # Comment: Is the form the default form for this account # Mandatory: no # Read-only: no # Type: boolean self.default = default # Comment: The name of the form that is displayed to an end user # Mandatory: no # Read-only: no # Type: string self.display_name = display_name # Comment: Is the form visible to the end user # Mandatory: no # Read-only: no # Type: boolean self.end_user_visible = end_user_visible self.id = id # Comment: Is the form available for use in all brands on this account # Mandatory: no # Read-only: no # Type: boolean self.in_all_brands = in_all_brands self.in_all_organizations = in_all_organizations # Comment: The name of the form # Mandatory: yes # Read-only: no # Type: string self.name = name # Comment: The position of this form among other forms in the account, i.e. dropdown # Mandatory: no # Read-only: no # Type: integer self.position = position # Comment: The dynamic content placeholder, if present, or the "display_name" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_display_name = raw_display_name # Comment: The dynamic content placeholder, if present, or the "name" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_name = raw_name # Comment: ids of all brands that this ticket form is restricted to # Mandatory: no # Read-only: yes # Type: array self.restricted_brand_ids = restricted_brand_ids self.restricted_organization_ids = restricted_organization_ids # Comment: ids of all ticket fields which are in this ticket form # Mandatory: no # Read-only: no # Type: array self.ticket_field_ids = ticket_field_ids self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def restricted_brands(self): """ | Comment: ids of all brands that this ticket form is restricted to """ if self.api and self.restricted_brand_ids: return self.api._get_restricted_brands(self.restricted_brand_ids) @restricted_brands.setter def restricted_brands(self, restricted_brands): if restricted_brands: self.restricted_brand_ids = [o.id for o in restricted_brands] self._restricted_brands = restricted_brands @property def restricted_organizations(self): if self.api and self.restricted_organization_ids: return self.api._get_restricted_organizations( self.restricted_organization_ids) @restricted_organizations.setter def restricted_organizations(self, restricted_organizations): if restricted_organizations: self.restricted_organization_ids = [ o.id for o in restricted_organizations ] self._restricted_organizations = restricted_organizations @property def ticket_fields(self): """ | Comment: ids of all ticket fields which are in this ticket form """ if self.api and self.ticket_field_ids: return self.api._get_ticket_fields(self.ticket_field_ids) @ticket_fields.setter def ticket_fields(self, ticket_fields): if ticket_fields: self.ticket_field_ids = [o.id for o in ticket_fields] self._ticket_fields = ticket_fields @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class TicketMetric(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, agent_wait_time_in_minutes=None, assigned_at=None, assignee_stations=None, assignee_updated_at=None, created_at=None, first_resolution_time_in_minutes=None, full_resolution_time_in_minutes=None, group_stations=None, id=None, initially_assigned_at=None, latest_comment_added_at=None, on_hold_time_in_minutes=None, reopens=None, replies=None, reply_time_in_minutes=None, requester_updated_at=None, requester_wait_time_in_minutes=None, solved_at=None, status_updated_at=None, ticket_id=None, updated_at=None, **kwargs): self.api = api # Comment: Number of minutes the agent spent waiting inside and out of business hours # Mandatory: no # Read-only: yes # Type: object self.agent_wait_time_in_minutes = agent_wait_time_in_minutes # Comment: When the ticket was last assigned # Mandatory: no # Read-only: yes # Type: date self.assigned_at = assigned_at # Comment: Number of assignees this ticket had # Mandatory: no # Read-only: yes # Type: integer self.assignee_stations = assignee_stations # Comment: When the assignee last updated the ticket # Mandatory: no # Read-only: yes # Type: date self.assignee_updated_at = assignee_updated_at # Comment: When this record was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: Number of minutes to the first resolution time inside and out of business hours # Mandatory: no # Read-only: yes # Type: object self.first_resolution_time_in_minutes = first_resolution_time_in_minutes # Comment: Number of minutes to the full resolution inside and out of business hours # Mandatory: no # Read-only: yes # Type: object self.full_resolution_time_in_minutes = full_resolution_time_in_minutes # Comment: Number of groups this ticket passed through # Mandatory: no # Read-only: yes # Type: integer self.group_stations = group_stations # Comment: Automatically assigned # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: When the ticket was initially assigned # Mandatory: no # Read-only: yes # Type: date self.initially_assigned_at = initially_assigned_at # Comment: When the latest comment was added # Mandatory: no # Read-only: yes # Type: date self.latest_comment_added_at = latest_comment_added_at self.on_hold_time_in_minutes = on_hold_time_in_minutes # Comment: Total number of times the ticket was reopened # Mandatory: no # Read-only: yes # Type: integer self.reopens = reopens # Comment: Total number of times ticket was replied to # Mandatory: no # Read-only: yes # Type: integer self.replies = replies # Comment: Number of minutes to the first reply inside and out of business hours # Mandatory: no # Read-only: yes # Type: object self.reply_time_in_minutes = reply_time_in_minutes # Comment: When the requester last updated the ticket # Mandatory: no # Read-only: yes # Type: date self.requester_updated_at = requester_updated_at # Comment: Number of minutes the requester spent waiting inside and out of business hours # Mandatory: no # Read-only: yes # Type: object self.requester_wait_time_in_minutes = requester_wait_time_in_minutes # Comment: When the ticket was solved # Mandatory: no # Read-only: yes # Type: date self.solved_at = solved_at # Comment: When the status was last updated # Mandatory: no # Read-only: yes # Type: date self.status_updated_at = status_updated_at # Comment: Id of the associated ticket # Mandatory: no # Read-only: yes # Type: integer self.ticket_id = ticket_id # Comment: When this record last got updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def assigned(self): """ | Comment: When the ticket was last assigned """ if self.assigned_at: return dateutil.parser.parse(self.assigned_at) @assigned.setter def assigned(self, assigned): if assigned: self.assigned_at = assigned @property def assignee_updated(self): """ | Comment: When the assignee last updated the ticket """ if self.assignee_updated_at: return dateutil.parser.parse(self.assignee_updated_at) @assignee_updated.setter def assignee_updated(self, assignee_updated): if assignee_updated: self.assignee_updated_at = assignee_updated @property def created(self): """ | Comment: When this record was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def initially_assigned(self): """ | Comment: When the ticket was initially assigned """ if self.initially_assigned_at: return dateutil.parser.parse(self.initially_assigned_at) @initially_assigned.setter def initially_assigned(self, initially_assigned): if initially_assigned: self.initially_assigned_at = initially_assigned @property def latest_comment_added(self): """ | Comment: When the latest comment was added """ if self.latest_comment_added_at: return dateutil.parser.parse(self.latest_comment_added_at) @latest_comment_added.setter def latest_comment_added(self, latest_comment_added): if latest_comment_added: self.latest_comment_added_at = latest_comment_added @property def requester_updated(self): """ | Comment: When the requester last updated the ticket """ if self.requester_updated_at: return dateutil.parser.parse(self.requester_updated_at) @requester_updated.setter def requester_updated(self, requester_updated): if requester_updated: self.requester_updated_at = requester_updated @property def solved(self): """ | Comment: When the ticket was solved """ if self.solved_at: return dateutil.parser.parse(self.solved_at) @solved.setter def solved(self, solved): if solved: self.solved_at = solved @property def status_updated(self): """ | Comment: When the status was last updated """ if self.status_updated_at: return dateutil.parser.parse(self.status_updated_at) @status_updated.setter def status_updated(self, status_updated): if status_updated: self.status_updated_at = status_updated @property def ticket(self): """ | Comment: Id of the associated ticket """ if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket @property def updated(self): """ | Comment: When this record last got updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class TicketMetricEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, deleted=None, id=None, instance_id=None, metric=None, sla=None, status=None, ticket_id=None, time=None, type=None, **kwargs): self.api = api self.deleted = deleted # Comment: Automatically assigned when the record is created # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The instance of the metric associated with the event. See instance_id # Mandatory: no # Read-only: yes # Type: integer self.instance_id = instance_id # Comment: One of the following: agent_work_time, pausable_update_time, periodic_update_time, reply_time, requester_wait_time, or resolution_time # Mandatory: no # Read-only: yes # Type: string self.metric = metric self.sla = sla self.status = status # Comment: Id of the associated ticket # Mandatory: no # Read-only: yes # Type: integer self.ticket_id = ticket_id # Comment: The time the event occurred # Mandatory: no # Read-only: yes # Type: date self.time = time # Comment: One of the following: activate, pause, fulfill, apply_sla, breach, or update_status. See Metric event types # Mandatory: no # Read-only: yes # Type: string self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def ticket(self): """ | Comment: Id of the associated ticket """ if self.api and self.ticket_id: return self.api._get_ticket(self.ticket_id) @ticket.setter def ticket(self, ticket): if ticket: self.ticket_id = ticket.id self._ticket = ticket class TicketMetricItem(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, business=None, calendar=None, **kwargs): self.api = api self.business = business self.calendar = calendar for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class TicketSharingEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, action=None, agreement_id=None, id=None, type=None, **kwargs): self.api = api self.action = action self.agreement_id = agreement_id self.id = id self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Topic(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, created_at=None, forum_id=None, id=None, locked=None, pinned=None, position=None, search_phrases=None, submitter_id=None, tags=None, title=None, topic_type=None, updated_at=None, updater_id=None, url=None, **kwargs): self.api = api self.body = body self.created_at = created_at self.forum_id = forum_id self.id = id self.locked = locked self.pinned = pinned self.position = position self.search_phrases = search_phrases self.submitter_id = submitter_id self.tags = tags self.title = title self.topic_type = topic_type self.updated_at = updated_at self.updater_id = updater_id self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def forum(self): if self.api and self.forum_id: return self.api._get_forum(self.forum_id) @forum.setter def forum(self, forum): if forum: self.forum_id = forum.id self._forum = forum @property def submitter(self): if self.api and self.submitter_id: return self.api._get_user(self.submitter_id) @submitter.setter def submitter(self, submitter): if submitter: self.submitter_id = submitter.id self._submitter = submitter @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated @property def updater(self): if self.api and self.updater_id: return self.api._get_user(self.updater_id) @updater.setter def updater(self, updater): if updater: self.updater_id = updater.id self._updater = updater class Trigger(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, actions=None, active=None, conditions=None, description=None, id=None, position=None, title=None, **kwargs): self.api = api # Comment: An array of Actions describing what the trigger will do # Type: array self.actions = actions # Comment: Whether the trigger is active # Type: boolean self.active = active # Comment: An object that describes the conditions under which the trigger will execute # Type: :class:`Conditions` self.conditions = conditions # Comment: The description of the trigger # Type: string self.description = description # Comment: Automatically assigned when created # Type: integer self.id = id # Comment: Position of the trigger, determines the order they will execute in # Type: integer self.position = position # Comment: The title of the trigger # Type: string self.title = title for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class TweetEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, body=None, direct_message=None, id=None, recipients=None, type=None, **kwargs): self.api = api self.body = body self.direct_message = direct_message self.id = id self.recipients = recipients self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Upload(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, attachment=None, attachments=None, expires_at=None, token=None, **kwargs): self.api = api self.attachment = attachment self.attachments = attachments self.expires_at = expires_at self.token = token for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def expires(self): if self.expires_at: return dateutil.parser.parse(self.expires_at) @expires.setter def expires(self, expires): if expires: self.expires_at = expires class User(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, alias=None, chat_only=None, created_at=None, custom_role_id=None, details=None, email=None, external_id=None, id=None, last_login_at=None, locale=None, locale_id=None, moderator=None, name=None, notes=None, only_private_comments=None, organization_id=None, phone=None, photo=None, restricted_agent=None, role=None, shared=None, shared_agent=None, signature=None, suspended=None, tags=None, ticket_restriction=None, time_zone=None, two_factor_auth_enabled=None, updated_at=None, url=None, user_fields=None, verified=None, **kwargs): self.api = api # Comment: false if the user has been deleted # Mandatory: no # Read-only: yes # Type: boolean self.active = active # Comment: An alias displayed to end users # Mandatory: no # Read-only: no # Type: string self.alias = alias # Comment: Whether or not the user is a chat-only agent # Mandatory: no # Read-only: yes # Type: boolean self.chat_only = chat_only # Comment: The time the user was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: A custom role if the user is an agent on the Enterprise plan # Mandatory: no # Read-only: no # Type: integer self.custom_role_id = custom_role_id # Comment: Any details you want to store about the user, such as an address # Mandatory: no # Read-only: no # Type: string self.details = details # Comment: The user's primary email address. Writeable on create only. On update, a secondary email is added. See Email Address # Mandatory: no # Read-only: no # Type: string self.email = email # Comment: A unique identifier from another system. The API treats the id as case insensitive. Example: ian1 and Ian1 are the same user # Mandatory: no # Read-only: no # Type: string self.external_id = external_id # Comment: Automatically assigned when the user is created # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: The last time the user signed in to Zendesk Support # Mandatory: no # Read-only: yes # Type: date self.last_login_at = last_login_at # Comment: The user's locale # Mandatory: no # Read-only: yes # Type: string self.locale = locale # Comment: The user's language identifier # Mandatory: no # Read-only: no # Type: integer self.locale_id = locale_id # Comment: Designates whether the user has forum moderation capabilities # Mandatory: no # Read-only: no # Type: boolean self.moderator = moderator # Comment: The user's name # Mandatory: yes # Read-only: no # Type: string self.name = name # Comment: Any notes you want to store about the user # Mandatory: no # Read-only: no # Type: string self.notes = notes # Comment: true if the user can only create private comments # Mandatory: no # Read-only: no # Type: boolean self.only_private_comments = only_private_comments # Comment: The id of the organization the user is associated with # Mandatory: no # Read-only: no # Type: integer self.organization_id = organization_id # Comment: The user's primary phone number. See Phone Number below # Mandatory: no # Read-only: no # Type: string self.phone = phone # Comment: The user's profile picture represented as an Attachment object # Mandatory: no # Read-only: no # Type: :class:`Attachment` self.photo = photo # Comment: If the agent has any restrictions; false for admins and unrestricted agents, true for other agents # Mandatory: no # Read-only: no # Type: boolean self.restricted_agent = restricted_agent # Comment: The user's role. Possible values are "end-user", "agent", or "admin" # Mandatory: no # Read-only: no # Type: string self.role = role # Comment: If the user is shared from a different Zendesk Support instance. Ticket sharing accounts only # Mandatory: no # Read-only: yes # Type: boolean self.shared = shared # Comment: If the user is a shared agent from a different Zendesk Support instance. Ticket sharing accounts only # Mandatory: no # Read-only: yes # Type: boolean self.shared_agent = shared_agent # Comment: The user's signature. Only agents and admins can have signatures # Mandatory: no # Read-only: no # Type: string self.signature = signature # Comment: If the agent is suspended. Tickets from suspended users are also suspended, and these users cannot sign in to the end user portal # Mandatory: no # Read-only: no # Type: boolean self.suspended = suspended # Comment: The user's tags. Only present if your account has user tagging enabled # Mandatory: no # Read-only: no # Type: array self.tags = tags # Comment: Specifies which tickets the user has access to. Possible values are: "organization", "groups", "assigned", "requested", null # Mandatory: no # Read-only: no # Type: string self.ticket_restriction = ticket_restriction # Comment: The user's time zone. See Time Zone # Mandatory: no # Read-only: no # Type: string self.time_zone = time_zone # Comment: If two factor authentication is enabled. # Mandatory: no # Read-only: yes # Type: boolean self.two_factor_auth_enabled = two_factor_auth_enabled # Comment: The time the user was last updated # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The user's API url # Mandatory: no # Read-only: yes # Type: string self.url = url # Comment: Values of custom fields in the user's profile. See User Fields # Mandatory: no # Read-only: no # Type: object self.user_fields = user_fields # Comment: The user's primary identity is verified or not. For secondary identities, see User Identities # Mandatory: no # Read-only: no # Type: boolean self.verified = verified for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the user was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def custom_role(self): """ | Comment: A custom role if the user is an agent on the Enterprise plan """ if self.api and self.custom_role_id: return self.api._get_custom_role(self.custom_role_id) @custom_role.setter def custom_role(self, custom_role): if custom_role: self.custom_role_id = custom_role.id self._custom_role = custom_role @property def last_login(self): """ | Comment: The last time the user signed in to Zendesk Support """ if self.last_login_at: return dateutil.parser.parse(self.last_login_at) @last_login.setter def last_login(self, last_login): if last_login: self.last_login_at = last_login @property def organization(self): """ | Comment: The id of the organization the user is associated with """ if self.api and self.organization_id: return self.api._get_organization(self.organization_id) @organization.setter def organization(self, organization): if organization: self.organization_id = organization.id self._organization = organization @property def updated(self): """ | Comment: The time the user was last updated """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class UserField(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, created_at=None, description=None, id=None, key=None, position=None, raw_description=None, raw_title=None, regexp_for_validation=None, title=None, type=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: If true, this field is available for use # Mandatory: no # Read-only: no # Type: boolean self.active = active # Comment: The time the ticket field was created # Mandatory: no # Read-only: yes # Type: date self.created_at = created_at # Comment: User-defined description of this field's purpose # Mandatory: no # Read-only: no # Type: string self.description = description # Comment: Automatically assigned upon creation # Mandatory: no # Read-only: yes # Type: integer self.id = id # Comment: A unique key that identifies this custom field. This is used for updating the field and referencing in placeholders. # Mandatory: on create # Read-only: no # Type: string self.key = key # Comment: Ordering of the field relative to other fields # Mandatory: no # Read-only: no # Type: integer self.position = position # Comment: The dynamic content placeholder, if present, or the "description" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_description = raw_description # Comment: The dynamic content placeholder, if present, or the "title" value, if not. See Dynamic Content # Mandatory: no # Read-only: no # Type: string self.raw_title = raw_title # Comment: Regular expression field only. The validation pattern for a field value to be deemed valid. # Mandatory: no # Read-only: no # Type: string self.regexp_for_validation = regexp_for_validation # Comment: The title of the custom field # Mandatory: yes # Read-only: no # Type: string self.title = title # Comment: Type of the custom field: "checkbox", "date", "decimal", "dropdown", "integer", "regexp", "text", or "textarea" # Mandatory: yes # Read-only: no # Type: string self.type = type # Comment: The time of the last update of the ticket field # Mandatory: no # Read-only: yes # Type: date self.updated_at = updated_at # Comment: The URL for this resource # Mandatory: no # Read-only: yes # Type: string self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the ticket field was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): """ | Comment: The time of the last update of the ticket field """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class UserRelated(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, assigned_tickets=None, ccd_tickets=None, entry_subscriptions=None, forum_subscriptions=None, organization_subscriptions=None, requested_tickets=None, subscriptions=None, topic_comments=None, topics=None, votes=None, **kwargs): self.api = api self.assigned_tickets = assigned_tickets self.ccd_tickets = ccd_tickets self.entry_subscriptions = entry_subscriptions self.forum_subscriptions = forum_subscriptions self.organization_subscriptions = organization_subscriptions self.requested_tickets = requested_tickets self.subscriptions = subscriptions self.topic_comments = topic_comments self.topics = topics self.votes = votes for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class Variant(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, content=None, created_at=None, default=None, id=None, locale_id=None, outdated=None, updated_at=None, url=None, **kwargs): self.api = api self.active = active self.content = content self.created_at = created_at self.default = default self.id = id self.locale_id = locale_id self.outdated = outdated self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Via(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, source=None, **kwargs): self.api = api self.source = source for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue class View(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, active=None, conditions=None, created_at=None, execution=None, id=None, position=None, raw_title=None, restriction=None, sla_id=None, title=None, updated_at=None, url=None, **kwargs): self.api = api # Comment: Whether the view is active # Read-only: no # Type: boolean self.active = active # Comment: An object describing how the view is constructed # Read-only: no # Type: :class:`Conditions` self.conditions = conditions # Comment: The time the view was created # Read-only: yes # Type: date self.created_at = created_at # Comment: An object describing how the view should be executed # Read-only: no # Type: :class:`Execute` self.execution = execution # Comment: Automatically assigned when created # Read-only: yes # Type: integer self.id = id # Comment: The position of the view # Read-only: no # Type: integer self.position = position self.raw_title = raw_title # Comment: Who may access this account. Will be null when everyone in the account can access it. # Read-only: no # Type: object self.restriction = restriction self.sla_id = sla_id # Comment: The title of the view # Read-only: no # Type: string self.title = title # Comment: The time of the last update of the view # Read-only: yes # Type: date self.updated_at = updated_at self.url = url for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): """ | Comment: The time the view was created """ if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def sla(self): if self.api and self.sla_id: return self.api._get_sla(self.sla_id) @sla.setter def sla(self, sla): if sla: self.sla_id = sla.id self._sla = sla @property def updated(self): """ | Comment: The time of the last update of the view """ if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class ViewCount(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, channel=None, fresh=None, poll_wait=None, pretty=None, refresh=None, url=None, value=None, view_id=None, **kwargs): self.api = api self.channel = channel self.fresh = fresh self.poll_wait = poll_wait self.pretty = pretty self.refresh = refresh self.url = url self.value = value self.view_id = view_id for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def view(self): if self.api and self.view_id: return self.api._get_view(self.view_id) @view.setter def view(self, view): if view: self.view_id = view.id self._view = view class ViewRow(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, created=None, custom_fields=None, fields=None, group_id=None, priority=None, requester_id=None, score=None, subject=None, ticket=None, **kwargs): self.api = api self.created = created self.custom_fields = custom_fields self.fields = fields self.group_id = group_id self.priority = priority self.requester_id = requester_id self.score = score self.subject = subject self.ticket = ticket for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def group(self): if self.api and self.group_id: return self.api._get_group(self.group_id) @group.setter def group(self, group): if group: self.group_id = group.id self._group = group @property def requester(self): if self.api and self.requester_id: return self.api._get_user(self.requester_id) @requester.setter def requester(self, requester): if requester: self.requester_id = requester.id self._requester = requester class VoiceCommentEvent(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, attachments=None, author_id=None, body=None, data=None, formatted_from=None, formatted_to=None, html_body=None, id=None, public=None, transcription_visible=None, trusted=None, type=None, **kwargs): self.api = api self.attachments = attachments self.author_id = author_id self.body = body self.data = data self.formatted_from = formatted_from self.formatted_to = formatted_to self.html_body = html_body self.id = id self.public = public self.transcription_visible = transcription_visible self.trusted = trusted self.type = type for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def author(self): if self.api and self.author_id: return self.api._get_user(self.author_id) @author.setter def author(self, author): if author: self.author_id = author.id self._author = author class Webhook(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, authentication=None, created_at=None, created_by=None, description=None, endpoint=None, external_source=None, http_method=None, id=None, name=None, request_format=None, signing_secret=None, status=None, subscriptions=None, updated_at=None, updated_by=None, **kwargs): self.api = api self.authentication = authentication self.created_at = created_at self.created_by = created_by self.description = description self.endpoint = endpoint self.external_source = external_source self.http_method = http_method self.id = id self.name = name self.request_format = request_format self.signing_secret = signing_secret self.status = status self.subscriptions = subscriptions self.updated_at = updated_at self.updated_by = updated_by for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def created(self): if self.created_at: return dateutil.parser.parse(self.created_at) @created.setter def created(self, created): if created: self.created_at = created @property def updated(self): if self.updated_at: return dateutil.parser.parse(self.updated_at) @updated.setter def updated(self, updated): if updated: self.updated_at = updated class Invocation(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, id=None, latest_completed_at=None, status=None, status_code=None, **kwargs): self.api = api self.id = id self.latest_completed_at = latest_completed_at self.status = status self.status_code = status_code for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def latest_completed(self): if self.latest_completed_at: return dateutil.parser.parse(self.latest_completed_at) @latest_completed.setter def latest_completed(self, latest_completed): if latest_completed: self.latest_completed_at = latest_completed class InvocationAttempt(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, completed_at=None, id=None, invocation_id=None, request=None, response=None, status=None, status_code=None, **kwargs): self.api = api self.completed_at = completed_at self.id = id self.invocation_id = invocation_id self.request = request self.response = response self.status = status self.status_code = status_code for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue @property def completed(self): if self.completed_at: return dateutil.parser.parse(self.completed_at) @completed.setter def completed(self, completed): if completed: self.completed_at = completed @property def invocation(self): if self.api and self.invocation_id: return self.api._get_invocation(self.invocation_id) @invocation.setter def invocation(self, invocation): if invocation: self.invocation_id = invocation.id self._invocation = invocation class WebhookSecret(BaseObject): """ ###################################################################### # Do not modify, this class is autogenerated by gen_classes.py # ###################################################################### """ def __init__(self, api=None, algorithm=None, secret=None, **kwargs): self.api = api self.algorithm = algorithm self.secret = secret for key, value in kwargs.items(): setattr(self, key, value) for key in self.to_dict(): if getattr(self, key) is None: try: self._dirty_attributes.remove(key) except KeyError: continue
zenpycbp
/zenpycbp-2.0.27-py3-none-any.whl/zenpy/lib/api_objects/__init__.py
__init__.py
==== zenq ==== .. image:: https://img.shields.io/pypi/v/zenq.svg :target: https://pypi.python.org/pypi/zenq .. image:: https://img.shields.io/travis/nareabg/zenq.svg :target: https://travis-ci.com/nareabg/zenq .. image:: https://readthedocs.org/projects/zenq/badge/?version=latest :target: https://zenq.readthedocs.io/en/latest/?version=latest :alt: Documentation Status CLV package * Free software: MIT license Installation ============ To install Zenq CLV Models Library, simply run the following command: .. code-block:: bash pip install zenq-clv-models The Story ========= In order to provide marketing analysts and data scientists with a useful tool, the ZENQ package was developed. Because it is connected to a database, our product can be utilized by a wider variety of customers, including those who have a limited understanding of coding. Users are able to run scripts derived from the ZENQ package while the package works on data pertaining to customers. The data may be inserted into the database by users. It gives users the ability to study the behaviors of consumers based on how they engage with the company. Computations of CLV and RFM, in addition to forecasts, are the primary objective of the package. It features a Machine Learning component that makes an assumption as to whether the client will "die" or still be alive after a certain amount of time has passed. For the purpose of developing assumptions about the customers' loyalness, ZENQ relies on the Pareto/NBD model. Because the package offers a number of different visualizations, it simplifies the process of comprehending the statistics and basing business decisions on those findings. Usage - Simple Example ====================== Once installed, you can use the library in your Python scripts as follows: .. code-block:: bash #run in terminal for postgres url creation docker run --name my-postgres-db -e POSTGRES_USER=master -e POSTGRES_PASSWORD=pass -e POSTGRES_DB=GLOBBING -p 5432:5432 -d postgres .. code-block:: python # Initialize database with tables from zenq.api.prepare_db import db m=db() m.main() .. code-block:: python # Insert data into database from zenq.api.endpoints import insert_facts insert_facts('globbing.csv', 'Customer', 'Gender', 'InvoiceId', 'Date', 'Product_weight', 'Product_price') .. code-block:: python # Insert data of logging into LOGS table from zenq.api.endpoints import update_log update_log() .. code-block:: python #define model from zenq.clvmodels.pareto import Model model = Model() cltv = model.cltv_df() rfm = model.rfm_score() parameters = model.model_params() alive = model.customer_is_alive() .. code-block:: python #define Visualizations from zenq.visualizations.plot import Visuals gender_price = visuals.gender_price() Credits ========= This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. .. _Cookiecutter: https://github.com/audreyr/cookiecutter .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
zenq
/zenq-0.1.0.tar.gz/zenq-0.1.0/README.rst
README.rst
.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/nareabg/zenq/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ zenq could always use more documentation, whether as part of the official zenq docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/nareabg/zenq/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `zenq` for local development. 1. Fork the `zenq` repo on GitHub. 2. Clone your fork locally:: $ git clone [email protected]:your_name_here/zenq.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv zenq $ cd zenq/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 zenq tests $ python setup.py test or pytest $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8, and for PyPy. Check https://travis-ci.com/nareabg/zenq/pull_requests and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ pytest tests.test_zenq Deploying --------- A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bump2version patch # possible: major / minor / patch $ git push $ git push --tags Travis will then deploy to PyPI if tests pass.
zenq
/zenq-0.1.0.tar.gz/zenq-0.1.0/CONTRIBUTING.rst
CONTRIBUTING.rst
.. highlight:: shell ============ Installation ============ Stable release -------------- To install zenq, run this command in your terminal: .. code-block:: console $ pip install zenq This is the preferred method to install zenq, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for zenq can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/nareabg/zenq Or download the `tarball`_: .. code-block:: console $ curl -OJL https://github.com/nareabg/zenq/tarball/master Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/nareabg/zenq .. _tarball: https://github.com/nareabg/zenq/tarball/master
zenq
/zenq-0.1.0.tar.gz/zenq-0.1.0/docs/installation.rst
installation.rst
zenq.api package ================ Submodules ---------- zenq.api.config module ---------------------- .. automodule:: zenq.api.config :members: :undoc-members: :show-inheritance: zenq.api.endpoints module ------------------------- .. automodule:: zenq.api.endpoints :members: :undoc-members: :show-inheritance: zenq.api.prepare\_db module --------------------------- .. automodule:: zenq.api.prepare_db :members: :undoc-members: :show-inheritance: zenq.api.tables module ---------------------- .. automodule:: zenq.api.tables :members: :undoc-members: :show-inheritance: Module contents --------------- .. automodule:: zenq.api :members: :undoc-members: :show-inheritance:
zenq
/zenq-0.1.0.tar.gz/zenq-0.1.0/docs/zenq.api.rst
zenq.api.rst
# zenroom_minimal_py A small python wrapper around the ZenroomRuntime from [zenroom_minimal](https://github.com/RiddleAndCode/zenroom_minimal). ## Building and Installing `zenroom_minimal` uses [maturin](https://github.com/PyO3/maturin) to build the python package. * `maturin publish` builds the crate into python packages and publishes them to pypi. * `maturin build` builds the wheels and stores them in a folder (`target/wheels` by default), but doesn't upload them. * `maturin develop` builds the crate and install it's as a python module directly in the current virtualenv. ## Example ```python from zenroom_minimal import Zenroom f = lambda x : """ Given("that my name is ''", function(name) ACK.name = name end) Then("say hello", function() OUT = "Hello, " .. ACK.name .. "!" end) Then("print all data", function() print(OUT) end) """ zenroom = Zenroom(f) zenroom.load(""" Scenario 'hello' Given that my name is 'Julian' Then say hello And print all data """) zenroom.eval() ``` ## API ### `Zenroom((scenario: string) -> string)` Create a new Zenroom runtime using the provided transformer for taking a scenario name and returning the appropriate Lua source for the scenario by name. ### `zenroom.load(source: string)` Load the given Zencode as a runnable source for the runtime environment ### `zenroom.load_data(data: string)` Load the given string as input data to input in the Zencode State Machine ### `zenroom.load_keys(keys: string)` Load the given string as keys data to input in the Zencode State Machine ### `zenroom.eval() -> string` Execute the loaded Zencode, data and keys in the given runtime environment and return the result as a string or throw an error.
zenroom-minimal
/zenroom_minimal-0.1.0.tar.gz/README.md
README.md
# Use Zenroom in Python3 <p align="center"> <br/> <a href="https://dev.zenroom.org/"> <img src="https://dev.zenroom.org/_media/images/zenroom_logo.png" height="140" alt="Zenroom"> </a> <h2 align="center"> zenroom.py 🐍 <br> <sub>A Python3 wrapper of <a href="https://zenroom.org">Zenroom</a>, a secure and small virtual machine for crypto language processing</sub> </h2> <br><br> <a href="https://travis-ci.com/dyne/zenroom-py"> <img src="https://travis-ci.com/dyne/zenroom-py.svg?branch=master" alt="Build status"/> </a> <a href="https://codecov.io/gh/dyne/zenroom-py"> <img src="https://codecov.io/gh/dyne/zenroom-py/branch/master/graph/badge.svg" alt="Code coverage"/> </a> <a href="https://pypi.org/project/zenroom/"> <img alt="PyPI" src="https://img.shields.io/pypi/v/zenroom.svg" alt="Latest release"> </a> </p> <hr/> This library attempts to provide a very simple wrapper around the Zenroom (https://zenroom.dyne.org/) crypto virtual machine developed as part of the DECODE project (https://decodeproject.eu/), that aims to make the Zenroom virtual machine easier to call from normal Python code. Zenroom itself does have good cross platform functionality, so if you are interested in finding out more about the functionalities offered by Zenroom, then please visit the website linked to above to find out more. *** ## 💾 Installation ```bash pip install zenroom ``` **NOTE** - the above command attempts to install the zenroom package, pulling in the Zenroom VM as a precompiled binary, so will only work on Linux and macOS machines. for the edge (syn to the latest commit on master) version please run: ```bash pip install zenroom --pre ``` The `zenroom` package is just a wrapper around the `zencode-exec` utility. You also need to install `zencode-exec`, you can download if from the official [releases on github](https://github.com/dyne/Zenroom/releases/) When after downloading you have to move it somewhere in your path: ``` sudo cp zencode-exec /usr/local/bin/ ``` Warning: on Mac OS, the executable is `zencode-exec.command` and you have to symlink it to `zencode-exec` ``` sudo cp zencode-exec.command /usr/local/bin/ cd /usr/local/bin sudo ln -s zencode-exec.command zencode-exec ``` *** ## 🎮 Usage Two main calls are exposed, one to run `zencode` and one for `zenroom scripts`. If you don't know what `zencode` is, you can start with this blogpost https://decodeproject.eu/blog/smart-contracts-english-speaker The official documentation is available on [https://dev.zenroom.org/zencode/](https://dev.zenroom.org/zencode/) A good set of examples of `zencode` contracts could be found on * [zencode simple tests](https://github.com/dyne/Zenroom/tree/master/test/zencode_simple) * [zencode coconut tests](https://github.com/dyne/Zenroom/tree/master/test/zencode_coconut) ### 🐍 Python wrapper the wrapper exposes two simple calls: * `zenroom_exec` * `zencode_exec` as the names suggest are the two methods to execute zenroom (lua scripts) and zencode. #### args Both functions accept the same arguments: - `script` **[string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str)** the lua script or the zencode script to be executed - `keys` **[string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str)** the optional keys string to pass in execution as documented in zenroom docs [here](https://dev.zenroom.org/wiki/how-to-exec/#keys-string) - `data` **[string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str)** the optional data string to pass in execution as documented in zenroom docs [here](https://dev.zenroom.org/wiki/how-to-exec/#data-string) - `conf` **[string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str)** the optional conf string to pass according to zen_config [here](https://github.com/dyne/Zenroom/blob/master/src/zen_config.c#L99-L104) #### return Both functions return the same object result `ZenResult` that have two attributes: - `stdout` **[string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str)** holds the stdout of the script execution - `stderr` **[string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str)** holds the stderr of the script execution ##### Examples Example usage of `zencode_exec(script, keys=None, data=None, conf=None)` ```python from zenroom import zenroom contract = """Scenario ecdh: Create a keypair" Given that I am known as 'identifier' When I create the keypair Then print my data """ result = zenroom.zencode_exec(contract) print(result.output) ``` Example usage of `zenroom_exec(script, keys=None, data=None, conf=None)` ```python from zenroom import zenroom script = "print('Hello world')" result = zenroom.zenroom_exec(script) print(result.output) ``` The same arguments and the same result are applied as the `zencode_exec` call. *** ## 📋 Testing Tests are made with pytests, just run `python setup.py test` in [`zenroom_test.py`](https://github.com/dyne/Zenroom/blob/master/bindings/python3/tests/test_all.py) file you'll find more usage examples of the wrapper *** ## 🌐 Links https://decodeproject.eu/ https://zenroom.org/ https://dev.zenroom.org/ ## 😍 Acknowledgements Copyright (C) 2018-2022 by [Dyne.org](https://www.dyne.org) foundation, Amsterdam Originally designed and written by Sam Mulube. Designed, written and maintained by Puria Nafisi Azizi Rewritten by Danilo Spinella and David Dashyan <img src="https://ec.europa.eu/cefdigital/wiki/download/attachments/289112547/logo-cef-digital-2021.png" width="310" alt="Project funded by the European Commission"> This project is receiving funding from the European Union’s Horizon 2020 research and innovation programme under grant agreement nr. 732546 (DECODE). *** ## 👥 Contributing Please first take a look at the [Dyne.org - Contributor License Agreement](CONTRIBUTING.md) then 1. 🔀 [FORK IT](https://github.com/dyne/Zenroom//fork) 2. Create your feature branch `git checkout -b feature/branch` 3. Commit your changes `git commit -am 'Add some fooBar'` 4. Push to the branch `git push origin feature/branch` 5. Create a new Pull Request `gh pr create -f` 6. 🙏 Thank you *** ## 💼 License Zenroom.py - a python wrapper of zenroom Copyright (c) 2018-2022 Dyne.org foundation, Amsterdam This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
zenroom
/zenroom-3.18.0.tar.gz/zenroom-3.18.0/src/docs/pages/python.md
python.md
# ZenRows Python SDK SDK to access [ZenRows](https://www.zenrows.com/) API directly from Python. ZenRows handles proxies rotation, headless browsers, and CAPTCHAs for you. ## Installation Install the SDK with pip. ```bash pip install zenrows ``` ## Usage Start using the API by [creating your API Key](https://www.zenrows.com/register?p=free). The SDK uses [requests](https://docs.python-requests.org/) for HTTP requests. The client's response will be a requests `Response`. It also uses [Retry](https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html) to automatically retry failed requests (status codes 429, 500, 502, 503, and 504). Retries are not active by default; you need to specify the number of retries, as shown below. It already includes an exponential back-off retry delay between failed requests. ```python from zenrows import ZenRowsClient client = ZenRowsClient("YOUR-API-KEY", retries=1) url = "https://www.zenrows.com/" response = client.get(url, params={ # Our algorithm allows to automatically extract content from any website "autoparse": False, # CSS Selectors for data extraction (i.e. {"links":"a @href"} to get href attributes from links) "css_extractor": "", # Enable Javascript with a headless browser (5 credits) "js_render": False, # Use residential proxies (10 credits) "premium_proxy": False, # Make your request from a given country. Requires premium_proxy "proxy_country": "us", # Wait for a given CSS Selector to load in the DOM. Requires js_render "wait_for": ".content", # Wait a fixed amount of time in milliseconds. Requires js_render "wait": 2500, # Block specific resources from loading, check docs for the full list. Requires js_render "block_resources": "image,media,font", # Change the browser's window width and height. Requires js_render "window_width": 1920, "window_height": 1080, # Will automatically use either desktop or mobile user agents in the headers "device": "desktop", # Will return the status code returned by the website "original_status": False, }, headers={ "Referrer": "https://www.google.com", "User-Agent": "MyCustomUserAgent", }) print(response.text) ``` You can also pass optional `params` and `headers`; the list above is a reference. For more info, check out [the documentation page](https://www.zenrows.com/documentation). Sending headers to the target URL will overwrite our defaults. Be careful when doing it and contact us if there is any problem. ### POST Requests The SDK also offers POST requests by calling the `client.post` method. It can receive a new parameter `data` that represents the data sent in, for example, a form. ```python from zenrows import ZenRowsClient client = ZenRowsClient("YOUR-API-KEY", retries=1) url = "https://httpbin.org/anything" response = client.post(url, data={ "key1": "value1", "key2": "value2", }) print(response.text) ``` ### Concurrency To limit the concurrency, it uses [asyncio](https://docs.python.org/3/library/asyncio.html), which will simultaneously send a maximum of requests. The concurrency is determined by the plan you are in, so take a look at the [pricing](https://www.zenrows.com/pricing) and set it accordingly. Take into account that each client instance will have its own limit, meaning that two different scripts will not share it, and 429 (Too Many Requests) errors might arise. The main difference with the sequential snippet above is `client.get_async` instead of `client.get`. The rest will work exactly the same, and we will support the `get` function. But the async is necessary to parallelize calls and allow async/await syntax. Remember to run the scripts with `asyncio.run` or it will fail with a `coroutine 'main' was never awaited` error. We use `asyncio.gather` in the example below. It will wait for all the calls to finish, and the results are stored in a `responses` array. The whole list of URLs will run, even if some fail. Then each response will have the status, request, response content, and other values as usual. ```python from zenrows import ZenRowsClient import asyncio client = ZenRowsClient("YOUR-API-KEY", concurrency=5, retries=1) async def main(): urls = [ "https://www.zenrows.com/", # ... ] responses = await asyncio.gather(*[client.get_async(url) for url in urls]) for response in responses: print(response.text) asyncio.run(main()) ``` ## Contributing Pull requests are welcome. For significant changes, please open an issue first to discuss what you would like to change. ## License [MIT](./LICENSE)
zenrows
/zenrows-1.3.0.tar.gz/zenrows-1.3.0/README.md
README.md
# simple-vuln-py-lib Python library that contains a snarky storage secret. ## Installation `pip install --upgrade zensectfpy` ## Public-facing functionality ```py from zensectfpy.rickroll import print_rickroll # Importing the `print_rickroll` function num_iterations:int = 10 # Number of times the Rickroll lyrics are to be printed print_rickroll(num_iterations) # Calling the function ``` ## Zense CTF Challenge There is some private functionality hidden in this repo. Access it, and you shall find the flag!
zensectfpy
/zensectfpy-0.0.2.tar.gz/zensectfpy-0.0.2/README.md
README.md
# Zenserp Python Client # Zenserp Python Client is the official Python Wrapper around the zenserp [API](https://zenserp.docs.apiary.io/). ## Installation Install from pip: ````sh pip install zenserp ```` Install from code: ````sh pip install git+https://github.com/zenserp/zenserp-python.git ```` ## Usage All zenserp API requests are made using the `Client` class. This class must be initialized with your API access key string. [Where is my API access key?](https://app.zenserp.com/dashboard) In your Python application, import `zenserp` and pass authentication information to initialize it: ````python import zenserp client = zenserp.Client('API_KEY') ```` ### Retrieve Status ```python status = client.status() print(status['remaining_requests']) ``` ### Retrieve SERPs ```python params = ( ('q', 'Pied Piper'), ('location', 'United States'), ('search_engine', 'google.com'), ) result = client.search(params) print(result) ``` ### Contact us Any feedback? Please feel free to [contact our team](mailto:[email protected]).
zenserp
/zenserp-0.2.tar.gz/zenserp-0.2/README.md
README.md
import logging import sys import os from optparse import OptionParser from pkg_resources import get_distribution, DistributionNotFound logger = logging.getLogger(__name__) class ActionCliError(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) class SimpleActionCli(object): """A simple action based command line interface. """ def __init__(self, executors, invokes, config=None, version='none', pkg_dist=None, opts=None, manditory_opts=None, environ_opts=None, default_action=None): """Construct. :param dict executors: keys are executor names and values are function that create the executor handler instance :param dict invokes: keys are names of in executors and values are arrays with the form: [<option name>, <method name>, <usage doc>] :param config: an instance of `zensols.config.Config` :param str version: the default version of this command line module, which is overrided by the package's version if it exists :param pkg_dist: the name of the module (i.e. zensols.actioncli) :param set opts: options to be parsed :param set manditory_opts: options that must be supplied in the command :param set environ_opts: options to add from environment variables; each are upcased to be match and retrieved from the environment but are lowercased in the results param set :param str default_action: the action to use if non is specified (if any) """ opts = opts if opts else set([]) manditory_opts = manditory_opts if manditory_opts else set([]) environ_opts = environ_opts if environ_opts else set([]) self.executors = executors self.invokes = invokes self.opts = opts self.manditory_opts = manditory_opts self.environ_opts = environ_opts self.version = version self.add_logging = False self.config = config self.default_action = default_action self.pkg = None if pkg_dist is not None: try: self.pkg = get_distribution(pkg_dist) self.version = self.pkg.version except DistributionNotFound: pass if config is not None: config.pkg = self.pkg def _config_logging(self, level): if level == 0: levelno = logging.WARNING elif level == 1: levelno = logging.INFO elif level == 2: levelno = logging.DEBUG if level <= 1: fmt = '%(message)s' else: fmt = '%(levelname)s:%(asctime)-15s %(name)s: %(message)s' self._config_log_level(fmt, levelno) def _config_log_level(self, fmt, levelno): if self.pkg is not None: logging.basicConfig(format=fmt, level=logging.WARNING) logging.getLogger(self.pkg.project_name).setLevel(level=levelno) else: root = logging.getLogger() map(root.removeHandler, root.handlers[:]) logging.basicConfig(format=fmt, level=levelno) root.setLevel(levelno) def print_actions(self, short): if short: for (name, action) in self.invokes.items(): print(name) else: pad = max(map(lambda x: len(x), self.invokes.keys())) + 2 fmt = '%%-%ds %%s' % pad for (name, action) in self.invokes.items(): print(fmt % (name, action[2])) def _add_whine_option(self, parser, default=0): parser.add_option('-w', '--whine', dest='whine', metavar='NUMBER', type='int', default=default, help='add verbosity to logging') self.add_logging = True def _add_short_option(self, parser): parser.add_option('-s', '--short', dest='short', help='short output for list', action='store_true') def _parser_error(self, msg): self.parser.error(msg) def _default_environ_opts(self): opts = {} for opt in self.environ_opts: opt_env = opt.upper() if opt_env in os.environ: opts[opt] = os.environ[opt_env] logger.debug('default environment options: %s' % opts) return opts def _init_executor(self, executor, config, args): pass def get_config(self, params): return self.config def _config_parser_for_action(self, args, parser): pass def config_parser(self): pass def _init_config(self, config): if config is not None and \ self.pkg is not None \ and hasattr(self, 'pkg'): config.pkg = self.pkg def _create_parser(self, usage): return OptionParser(usage=usage, version='%prog ' + str(self.version)) def create_executor(self, args=sys.argv[1:]): usage = '%prog <list|...> [options]' parser = self._create_parser(usage) self.parser = parser self.config_parser() logger.debug(f'configured parser: {parser}') if len(args) > 0 and args[0] in self.invokes: logger.info('configuring parser on action: %s' % args[0]) self._config_parser_for_action(args, parser) logger.debug(f'parsing arguments: {args}') (options, args) = parser.parse_args(args) logger.debug('options: <%s>, args: <%s>' % (options, args)) self.parsed_options = options self.parsed_args = args if len(args) > 0: action = args[0] else: if self.default_action is None: self._parser_error('missing action mnemonic') else: logger.debug('using default action: %s' % self.default_action) action = self.default_action logger.debug('adding logging') if self.add_logging: self._config_logging(options.whine) if action == 'list': short = hasattr(options, 'short') and options.short self.print_actions(short) return None, None else: if action not in self.invokes: self._parser_error("no such action: '%s'" % action) (exec_name, meth, _) = self.invokes[action] logging.debug('exec_name: %s, meth: %s' % (exec_name, meth)) params = vars(options) config = self.get_config(params) self._init_config(config) def_params = config.options if config else {} def_params.update(self._default_environ_opts()) for k, v in params.items(): if v is None and k in def_params: params[k] = def_params[k] logger.debug('before filter: %s' % params) params = {k: params[k] for k in params.keys() & self.opts} for opt in self.manditory_opts: if opt not in params or params[opt] is None: self._parser_error('missing option: %s' % opt) if config: params['config'] = config try: exec_obj = self.executors[exec_name](params) self._init_executor(exec_obj, config, args[1:]) return meth, exec_obj except ActionCliError as err: self._parser_error(format(err)) def invoke(self, args=sys.argv[1:]): meth, exec_obj = self.create_executor(args) if exec_obj is not None: try: logging.debug('invoking: %s.%s' % (exec_obj, meth)) getattr(exec_obj, meth)() except ActionCliError as err: self._parser_error(format(err))
zensols.actioncli
/zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/simple.py
simple.py
__author__ = 'Paul Landes' import logging import inspect import time as tm from functools import wraps import errno import os import signal time_logger = logging.getLogger(__name__) class time(object): """Used in a ``with`` scope that executes the body and logs the elapsed time. Format f-strings are supported as the locals are taken from the calling frame on exit. This means you can do things like: with time('processed {cnt} items'): cnt = 5 tm.sleep(1) which produeces: ``processed 5 items``. See the initializer documentation about special treatment for global loggers. """ def __init__(self, msg, level=logging.INFO, logger=None): """Create the time object. If a logger is not given, it is taken from the calling frame's global variable named ``logger``. If this global doesn't exit it logs to standard out. You can force standard out instead of a logger by using :param msg: the message log when exiting the closure :param logger: the logger to use for logging or the string ``stdout`` for printing to standard :param level: the level at which the message is logged """ self.msg = msg self.logger = logger self.level = level frame = inspect.currentframe() try: globs = frame.f_back.f_globals if 'logger' in globs: self.logger = globs['logger'] except Exception as e: time_logger.error(e) def __enter__(self): self.t0 = tm.time() def __exit__(self, type, value, traceback): elapse = tm.time() - self.t0 msg = self.msg frame = inspect.currentframe() try: locals = frame.f_back.f_locals msg = msg.format(**locals) except Exception as e: time_logger.error(e) msgstr = f'{msg} in {elapse:.1f}s' if self.logger is not None: self.logger.log(self.level, msgstr) else: print(msgstr) class TimeoutError(Exception): pass TIMEOUT_DEFAULT = 10 def timeout(seconds=TIMEOUT_DEFAULT, error_message=os.strerror(errno.ETIME)): """This creates a decorator called @timeout that can be applied to any long running functions. So, in your application code, you can use the decorator like so: from timeout import timeout # Timeout a long running function with the default expiry of # TIMEOUT_DEFAULT seconds. @timeout def long_running_function1(): :see https://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish: :author David Narayan """ def decorator(func): def _handle_timeout(signum, frame): raise TimeoutError(error_message) def wrapper(*args, **kwargs): signal.signal(signal.SIGALRM, _handle_timeout) signal.alarm(seconds) try: result = func(*args, **kwargs) finally: signal.alarm(0) return result return wraps(func)(wrapper) return decorator class timeprotect(object): """Invokes a block and bails if not completed in a specified number of seconds. :param seconds: the number of seconds to wait :param timeout_handler: function that takes a single argument, which is this ``timeprotect`` object instance; if ``None``, then nothing is done if the block times out :param context: an object accessible from the ``timeout_hander`` via ``self``, which defaults to ``None`` :see timeout: """ def __init__(self, seconds=TIMEOUT_DEFAULT, timeout_handler=None, context=None, error_message=os.strerror(errno.ETIME)): self.seconds = seconds self.timeout_handler = timeout_handler self.context = context self.error_message = error_message self.timeout_handler_exception = None def __enter__(self): def _handle_timeout(signum, frame): signal.alarm(0) if self.timeout_handler is not None: try: self.timeout_handler(self) except Exception as e: time_logger.exception( f'could not recover from timeout handler: {e}') self.timeout_handler_exception = e raise TimeoutError(self.error_message) signal.signal(signal.SIGALRM, _handle_timeout) signal.alarm(self.seconds) def __exit__(self, cls, value, traceback): signal.alarm(0) return True
zensols.actioncli
/zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/time.py
time.py
import logging import yaml import pprint import copy from zensols.actioncli import Configurable logger = logging.getLogger(__name__) class YamlConfig(Configurable): """Just like zensols.actioncli.Config but parse configuration from YAML files. Variable substitution works just like ini files, but you can set what delimiter to use and keys are the paths of the data in the hierarchy separated by dots. See the test cases for example. """ CLASS_VER = 0 def __init__(self, config_file=None, default_vars=None, delimiter='$', default_expect=False): super(YamlConfig, self).__init__(config_file, default_expect) self.default_vars = default_vars if default_vars else {} self.delimiter = delimiter @classmethod def _is_primitive(cls, obj): return isinstance(obj, str) or \ isinstance(obj, list) or \ isinstance(obj, bool) def _parse(self): with open(self.config_file) as f: content = f.read() struct = yaml.load(content, yaml.FullLoader) context = {} context.update(self.default_vars) def flatten(path, n): logger.debug('path: {}, n: <{}>'.format(path, n)) logger.debug('context: <{}>'.format(context)) if self._is_primitive(n): context[path] = n else: if isinstance(n, dict): for k, v in n.items(): k = path + '.' + k if len(path) else k flatten(k, v) else: raise ValueError('unknown yaml type {}: {}'. format(type(n), n)) flatten('', struct) self._all_keys = copy.copy(list(context.keys())) return content, struct, context def _make_class(self): class_name = 'YamlTemplate{}'.format(self.CLASS_VER) self.CLASS_VER += 1 # why couldn't they have made idpattern and delimiter instance members? # note we have to give the option of different delimiters since the # default '$$' (use case=OS env vars) is always resolved to '$' given # the iterative variable substitution method code = """\ from string import Template class """ + class_name + """(Template): idpattern = r'[a-z][_a-z0-9.]*' delimiter = '""" + self.delimiter + '\'' exec(code) cls = eval(class_name) return cls def _compile(self): content, struct, context = self._parse() prev = None cls = self._make_class() while prev != content: prev = content # TODO: raise here for missing keys embedded in the file rather # than KeyError content = cls(content).substitute(context) return yaml.load(content, yaml.FullLoader) @property def config(self): if not hasattr(self, '_config'): self._config = self._compile() return self._config def pprint(self): pprint.PrettyPrinter().pprint(self.config) def _option(self, name): def find(n, path, name): logger.debug( 'search: n={}, path={}, name={}'.format(n, path, name)) if path == name: logger.debug('found: <{}>'.format(n)) return n elif isinstance(n, dict): for k, v in n.items(): k = path + '.' + k if len(path) else k v = find(v, k, name) if v is not None: logger.debug('found {} -> {}'.format(name, v)) return v logger.debug('not found: {}'.format(name)) return find(self.config, '', name) def _get_option(self, name, expect=None): node = self._option(name) if self._is_primitive(node): return node elif self.default_vars is not None and name in self.default_vars: return self.default_vars[name] elif self._narrow_expect(expect): raise ValueError('no such option: {}'.format(name)) @property def options(self): if not hasattr(self, '_options'): self.config self._options = {} for k in self._all_keys: self._options[k] = self._get_option(k, expect=True) return self._options def get_option(self, name, expect=None): if self.default_vars and name in self.default_vars: return self.default_vars[name] else: ops = self.options if name in ops: return ops[name] elif self._narrow_expect(expect): raise ValueError('no such option: {}'.format(name)) def get_options(self, name, expect=None): if self.default_vars and name in self.default_vars: return self.default_vars[name] else: node = self._option(name) if not isinstance(node, str) or isinstance(node, list): return node elif name in self.default_vars: return self.default_vars[name] elif self._narrow_expect(expect): raise ValueError('no such option: {}'.format(name))
zensols.actioncli
/zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/yaml_config.py
yaml_config.py
import os import sys import logging from abc import ABCMeta, abstractmethod from copy import deepcopy import re import configparser from pathlib import Path import inspect import pkg_resources logger = logging.getLogger(__name__) class Settings(object): def __str__(self): return str(self.__dict__) def __repr__(self): return self.__str__() def pprint(self): from pprint import pprint pprint(self.__dict__) class Configurable(object): FLOAT_REGEXP = re.compile(r'^[-+]?\d*\.\d+$') INT_REGEXP = re.compile(r'^[-+]?[0-9]+$') BOOL_REGEXP = re.compile(r'^True|False') EVAL_REGEXP = re.compile(r'^eval:\s*(.+)$') def __init__(self, config_file, default_expect): self.config_file = config_file self.default_expect = default_expect def _narrow_expect(self, expect): if expect is None: expect = self.default_expect return expect def get_option(self, name, expect=None): raise ValueError('get_option is not implemented') def get_options(self, name, expect=None): raise ValueError('get_options is not implemented') @property def options(self): raise ValueError('get_option is not implemented') def populate(self, obj=None, section=None, parse_types=True): """Set attributes in ``obj`` with ``setattr`` from the all values in ``section``. """ section = self.default_section if section is None else section obj = Settings() if obj is None else obj is_dict = isinstance(obj, dict) for k, v in self.get_options(section).items(): if parse_types: if v == 'None': v = None elif self.FLOAT_REGEXP.match(v): v = float(v) elif self.INT_REGEXP.match(v): v = int(v) elif self.BOOL_REGEXP.match(v): v = v == 'True' else: m = self.EVAL_REGEXP.match(v) if m: evalstr = m.group(1) v = eval(evalstr) logger.debug('setting {} => {} on {}'.format(k, v, obj)) if is_dict: obj[k] = v else: setattr(obj, k, v) return obj def _get_calling_module(self): """Get the last module in the call stack that is not this module or ``None`` if the call originated from this module. """ for frame in inspect.stack(): mod = inspect.getmodule(frame[0]) logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if mod_name != __name__: return mod def resource_filename(self, resource_name, module_name=None): """Return a resource based on a file name. This uses the ``pkg_resources`` package first to find the resources. If it doesn't find it, it returns a path on the file system. :param: resource_name the file name of the resource to obtain (or name if obtained from an installed module) :param module_name: the name of the module to obtain the data, which defaults to ``__name__`` :return: a path on the file system or resource of the installed module """ if module_name is None: mod = self._get_calling_module() logger.debug(f'calling module: {mod}') if mod is not None: mod_name = mod.__name__ if module_name is None: module_name = __name__ if pkg_resources.resource_exists(mod_name, resource_name): res = pkg_resources.resource_filename(mod_name, resource_name) else: res = resource_name return Path(res) class Config(Configurable): """Application configuration utility. This reads from a configuration and returns sets or subsets of options. """ def __init__(self, config_file=None, default_section='default', robust=False, default_vars=None, default_expect=False, create_defaults=None): """Create with a configuration file path. Keyword arguments: :param str config_file: the configuration file path to read from :param str default_section: default section (defaults to `default`) :param bool robust: if `True`, then don't raise an error when the configuration file is missing :param default_expect: if ``True``, raise exceptions when keys and/or sections are not found in the configuration :param create_defaults: used to initialize the configuration parser, and useful for when substitution values are baked in to the configuration file """ super(Config, self).__init__(config_file, default_expect) self.default_section = default_section self.robust = robust self.default_vars = self._munge_default_vars(default_vars) self.create_defaults = self._munge_create_defaults(create_defaults) self.nascent = deepcopy(self.__dict__) def _munge_default_vars(self, vars): return vars def _munge_create_defaults(self, vars): return vars def _create_config_parser(self): "Factory method to create the ConfigParser." return configparser.ConfigParser(defaults=self.create_defaults) @property def content(self): "Return the contents of the configuration file." with open(os.path.expanduser(self.config_file)) as f: return f.read() @property def parser(self): "Load the configuration file." if not hasattr(self, '_conf'): cfile = self.config_file logger.debug('loading config %s' % cfile) if os.path.isfile(cfile): conf = self._create_config_parser() conf.read(os.path.expanduser(cfile)) else: if self.robust: logger.debug(f'no default config file {cfile}--skipping') else: raise IOError(f'no such file: {cfile}') conf = None self._conf = conf return self._conf @property def file_exists(self): return self.parser is not None def get_options(self, section='default', opt_keys=None, vars=None): """ Get all options for a section. If ``opt_keys`` is given return only options with those keys. """ vars = vars if vars else self.default_vars conf = self.parser opts = {} if opt_keys is None: if conf is None: opt_keys = {} else: if not self.robust or conf.has_section(section): opt_keys = conf.options(section) else: opt_keys = {} else: logger.debug('conf: %s' % conf) copts = conf.options(section) if conf else {} opt_keys = set(opt_keys).intersection(set(copts)) for option in opt_keys: logger.debug(f'option: {option}, vars: {vars}') opts[option] = conf.get(section, option, vars=vars) return opts def get_option(self, name, section=None, vars=None, expect=None): """Return an option from ``section`` with ``name``. :param section: section in the ini file to fetch the value; defaults to constructor's ``default_section`` """ vars = vars if vars else self.default_vars if section is None: section = self.default_section opts = self.get_options(section, opt_keys=[name], vars=vars) if opts: return opts[name] else: if self._narrow_expect(expect): raise ValueError('no option \'{}\' found in section {}'. format(name, section)) def get_option_list(self, name, section=None, vars=None, expect=None, separator=','): """Just like ``get_option`` but parse as a list using ``split``. """ val = self.get_option(name, section, vars, expect) return val.split(separator) if val else [] def get_option_boolean(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as a boolean (any case `true`). """ val = self.get_option(name, section, vars, expect) val = val.lower() if val else 'false' return val == 'true' def get_option_int(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as an integer.""" val = self.get_option(name, section, vars, expect) if val: return int(val) def get_option_float(self, name, section=None, vars=None, expect=None): """Just like ``get_option`` but parse as a float.""" val = self.get_option(name, section, vars, expect) if val: return float(val) def get_option_path(self, name, section=None, vars=None, expect=None, create=None): """Just like ``get_option`` but return a ``pathlib.Path`` object of the string. :param create: if ``parent`` then create the path and all parents not including the file; if ``dir``, then create all parents; otherwise do not create anything """ val = self.get_option(name, section, vars, expect) path = None if val is not None: path = Path(val) if create == 'dir': path.mkdir(parents=True, exist_ok=True) if create == 'file': path.parent.mkdir(parents=True, exist_ok=True) return path @property def options(self): "Return all options from the default section." return self.get_options() @property def sections(self): "Return all sections." secs = self.parser.sections() if secs: return set(secs) def set_option(self, name, value, section=None): logger.debug(f'setting option {name}: {value} in section {section}') if not self.parser.has_section(section): self.parser.add_section(section) self.parser.set(section, name, value) def copy_sections(self, to_populate: Configurable, sections: list): for sec in sections: for k, v in self.get_options(sec).items(): to_populate.set_option(k, v, sec) def derive_from_resource(self, path: str, copy_sections=()) -> Configurable: """Derive a new configuration from the resource file name ``path``. :param path: a resource file (i.e. ``resources/app.conf``) :pram copy_sections: a list of sections to copy from this to the derived configuration """ kwargs = deepcopy(self.nascent) kwargs['config_file'] = path conf = self.__class__(**kwargs) self.copy_sections(conf, copy_sections) return conf def pprint(self, writer=sys.stdout): """Print a human readable list of sections and options. """ for sec in self.sections: writer.write(f'{sec}:\n') for k, v in self.get_options(sec).items(): writer.write(f' {k}: {v}\n') def __str__(self): return str('file: {}, section: {}'. format(self.config_file, self.sections)) def __repr__(self): return self.__str__() class ExtendedInterpolationConfig(Config): """Configuration class extends using advanced interpolation with ``configparser.ExtendedInterpolation``. """ def _create_config_parser(self): inter = configparser.ExtendedInterpolation() return configparser.ConfigParser( defaults=self.create_defaults, interpolation=inter) class ExtendedInterpolationEnvConfig(ExtendedInterpolationConfig): """A ``Config`` implementation that creates a section called ``env`` with environment variables passed. """ def __init__(self, *args, remove_vars: bool = None, env: dict = None, env_sec: str = 'env', **kwargs): if 'default_expect' not in kwargs: kwargs['default_expect'] = True self.remove_vars = remove_vars if env is None: self.env = os.environ else: self.env = env self.env_sec = env_sec super(ExtendedInterpolationEnvConfig, self).__init__(*args, **kwargs) def _munge_default_vars(self, vars): if vars is not None and self.remove_vars is not None: for n in self.remove_vars: if n in vars: del vars[n] return vars def _create_config_parser(self): parser = super(ExtendedInterpolationEnvConfig, self)._create_config_parser() sec = self.env_sec parser.add_section(sec) for k, v in self.env.items(): logger.debug(f'adding env section {sec}: {k} -> {v}') parser.set(sec, k, v) # purify for pickle del self.env return parser class CommandLineConfig(Config, metaclass=ABCMeta): """A configuration object that allows creation by using command line arguments as defaults when the configuration file is missing. Sub classes must implement the ``set_defaults`` method. All defaults set in this method are then created in the default section of the configuration when created with the static method ``from_args``, which is called with the parsed command line arguments (usually from some instance or instance of subclass ``SimpleActionCli``. """ def __init__(self, *args, **kwargs): super(CommandLineConfig, self).__init__(*args, **kwargs) def set_default(self, name: str, value: str, clobber: bool = None): """Set a default value in the ``default`` section of the configuration. """ if clobber is not None: self.set_option(name, clobber, self.default_section) elif name not in self.options and value is not None: self.set_option(name, value, self.default_section) @abstractmethod def set_defaults(self, *args, **kwargs): pass @classmethod def from_args(cls, config=None, *args, **kwargs): if config is None: self = cls() self._conf = self._create_config_parser() self.parser.add_section(self.default_section) else: self = config self.set_defaults(*args, **kwargs) return self
zensols.actioncli
/zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/config.py
config.py
import re import os import sys import logging import inspect from functools import reduce import optparse from optparse import OptionParser from zensols.actioncli import SimpleActionCli, Config logger = logging.getLogger(__name__) class PrintActionsOptionParser(OptionParser): """Implements a human readable implementation of print_help for action based command line handlers (i.e. OneConfPerActionOptionsCli). """ def __init__(self, *args, **kwargs): super(PrintActionsOptionParser, self).__init__(*args, **kwargs) @property def action_options(self): return self._action_options @property def action_names(self): return sorted(self.action_options.keys()) @action_options.setter def action_options(self, opts): self._action_options = opts self.usage = '%prog <list|{}> [options]'.\ format('|'.join(self.action_names)) def print_help(self, file=sys.stdout): logger.debug('print help: %s' % self.invokes) logger.debug('action options: %s' % self.action_options) OptionParser.print_help(self, file) action_name_len = reduce(lambda x, y: max(x, y), map(lambda x: len(x), self.action_names)) action_fmt_str = ' {:<' + str(action_name_len) + '} {}' action_help = [] opt_str_len = 0 def_str_len = 0 # format text for each action and respective options for action_name in self.action_names: if action_name in self.invokes: action_doc = self.invokes[action_name][2].capitalize() opts = map(lambda x: x['opt_obj'], self.action_options[action_name]) logger.debug('{} -> {}, {}'.format( action_name, action_doc, opts)) opt_strs = [] for opt in opts: short_opt, long_opt, sep, default = '', '', '', '' if opt._short_opts and len(opt._short_opts) > 0: short_opt = opt._short_opts[0] if opt._long_opts and len(opt._long_opts) > 0: long_opt = opt._long_opts[0] if opt.metavar is not None: otype = ' <{}>'.format(opt.metavar) elif opt.type is not None: otype = ' <{}>'.format(opt.type.upper()) else: otype = '' if len(short_opt) > 0 and len(long_opt) > 0: sep = ', ' opt_str = ' {}{}{}{}'.format( short_opt, sep, long_opt, otype) if opt.default and opt.default != ('NO', 'DEFAULT'): default = str(opt.default) opt_strs.append({'str': opt_str, 'default': default, 'help': opt.help}) opt_str_len = max(opt_str_len, len(opt_str)) def_str_len = max(def_str_len, len(default)) action_help.append( {'doc': action_fmt_str.format(action_name, action_doc), 'opts': opt_strs}) opt_str_fmt = '{:<' + str(opt_str_len) + '} {:<' +\ str(def_str_len) + '} {}\n' file.write('Actions:\n') for i, ah in enumerate(action_help): file.write(ah['doc'] + '\n') for op in ah['opts']: file.write(opt_str_fmt.format( op['str'], op['default'], op['help'])) if i < len(action_help) - 1: file.write('\n') class PerActionOptionsCli(SimpleActionCli): def __init__(self, *args, **kwargs): self.action_options = {} super(PerActionOptionsCli, self).__init__(*args, **kwargs) def _init_executor(self, executor, config, args): mems = inspect.getmembers(executor, predicate=inspect.ismethod) if 'set_args' in (set(map(lambda x: x[0], mems))): executor.set_args(args) def _log_config(self): logger.debug('executors: %s' % self.executors) logger.debug('invokes: %s' % self.invokes) logger.debug('action options: %s' % self.action_options) logger.debug('opts: %s' % self.opts) logger.debug('manditory opts: %s' % self.manditory_opts) def make_option(self, *args, **kwargs): return optparse.make_option(*args, **kwargs) def _create_parser(self, usage): return PrintActionsOptionParser( usage=usage, version='%prog ' + str(self.version)) def _config_parser_for_action(self, args, parser): logger.debug('config parser for action: %s' % args) action = args[0] if action in self.action_options: for opt_cfg in self.action_options[action]: opt_obj = opt_cfg['opt_obj'] parser.add_option(opt_obj) self.opts.add(opt_obj.dest) logger.debug('manditory: %s' % opt_cfg['manditory']) if opt_cfg['manditory']: self.manditory_opts.add(opt_obj.dest) self._log_config() class OneConfPerActionOptionsCli(PerActionOptionsCli): """Convenience action handler that allows a definition on a per action basis. See the test cases for examples of how to use this as the detail is all in the configuration pased to the init method. :param opt_config: the option configuration (see project documentation) :param config_type: the class used for the configuration and defaults to ``zensols.actioncli.Config`` """ def __init__(self, opt_config, config_type=Config, **kwargs): self.opt_config = opt_config self.config_type = config_type super(OneConfPerActionOptionsCli, self).__init__({}, {}, **kwargs) def _config_global(self, oc): parser = self.parser logger.debug('global opt config: %s' % oc) if 'whine' in oc and oc['whine']: logger.debug('configuring whine option') self._add_whine_option(parser, default=oc['whine']) if 'short' in oc and oc['short']: logger.debug('configuring short option') self._add_short_option(parser) if 'config_option' in oc: conf = oc['config_option'] self.config_opt_conf = conf opt = conf['opt'] logger.debug('config opt: %s', opt) opt_obj = self.make_option(opt[0], opt[1], **opt[3]) parser.add_option(opt_obj) if opt[2]: self.manditory_opts.add(opt_obj.dest) if 'global_options' in oc: for opt in oc['global_options']: logger.debug('global opt: %s', opt) opt_obj = self.make_option(opt[0], opt[1], **opt[3]) logger.debug('parser opt: %s', opt_obj) parser.add_option(opt_obj) self.opts.add(opt_obj.dest) if opt[2]: self.manditory_opts.add(opt_obj.dest) def _config_executor(self, oc): exec_name = oc['name'] gaopts = self.action_options logger.debug('config opt config: %s' % oc) for action in oc['actions']: action_name = action['name'] meth = action['meth'] if 'meth' in action else re.sub(r'[- ]', '_', action_name) doc = action['doc'] if 'doc' in action else re.sub(r'[-_]', ' ', meth) inv = [exec_name, meth, doc] logger.debug('inferred action: %s: %s' % (action, inv)) self.invokes[action_name] = inv if 'opts' not in action: action['opts'] = () aopts = gaopts[action_name] if action_name in gaopts else [] gaopts[action_name] = aopts for opt in action['opts']: logger.debug('action opt: %s' % opt) opt_obj = self.make_option(opt[0], opt[1], **opt[3]) logger.debug('action opt obj: %s' % opt_obj) aopts.append({'opt_obj': opt_obj, 'manditory': opt[2]}) self.executors[exec_name] = oc['executor'] def config_parser(self): super(OneConfPerActionOptionsCli, self).config_parser() parser = self.parser self._config_global(self.opt_config) for oc in self.opt_config['executors']: self._config_executor(oc) parser.action_options = self.action_options parser.invokes = self.invokes self._log_config() logger.debug('finished config parser') def _create_config(self, conf_file, default_vars): return self.config_type( config_file=conf_file, default_vars=default_vars) def _get_default_config(self, params): return super(OneConfPerActionOptionsCli, self).get_config(params) def _find_conf_file(self, conf, params): conf_name = conf['name'] conf_file = params[conf_name] logger.debug('config configuration: %s, name: %s, params: %s' % (conf, conf_name, params)) if conf_file is not None: if not os.path.isfile(conf_file) and \ ('expect' not in conf or conf['expect']): raise IOError('no such configuration file: %s' % conf_file) return conf_file def get_config(self, params): if not hasattr(self, 'config_opt_conf'): conf = self._get_default_config(params) else: conf_def = self.config_opt_conf conf_file = self._find_conf_file(conf_def, params) if conf_file is None: conf = None else: good_keys = filter(lambda x: params[x] is not None, params.keys()) defaults = {k: str(params[k]) for k in good_keys} logger.debug('defaults: %s' % defaults) conf = self._create_config(conf_file, defaults) logger.debug('created config: %s' % conf) if conf is None: conf = self._get_default_config(params) logger.debug('returning config: %s' % conf) return conf class OneConfPerActionOptionsCliEnv(OneConfPerActionOptionsCli): """A command line option parser that first parses an ini file and passes that configuration on to the rest of the CLI action processing in the super class. """ def __init__(self, opt_config, config_env_name=None, no_os_environ=False, *args, **kwargs): """Initialize. :param opt_config: the option configuration (see project documentation) :param config_env_name: the name of the environment variable that holds the resource like name (i.e. ~/.<program name>rc); this will be used as the configuration file if it is given and found; otherwise a ``ValueError`` is rasied if not found :param no_os_environ: if ``True`` do not add environment variables to the configuration environment """ super(OneConfPerActionOptionsCliEnv, self).__init__( opt_config, *args, **kwargs) if config_env_name is None: self.default_config_file = None else: conf_env_var = config_env_name.upper() if conf_env_var in os.environ: default_config_file = os.environ[conf_env_var] else: default_config_file = os.path.expanduser( '~/.{}'.format(config_env_name)) logger.debug('configured default config file: {}'.format( default_config_file)) self.default_config_file = default_config_file self.no_os_environ = no_os_environ def _create_config(self, conf_file, default_vars): defs = {} defs.update(default_vars) if not self.no_os_environ: logger.debug(f'adding environment to config: {os.environ}') defs.update(os.environ) logger.debug('creating with conf_file: {}'.format(conf_file)) return super(OneConfPerActionOptionsCliEnv, self)._create_config( conf_file, defs) def _find_conf_file(self, conf, params): logger.debug('finding config: {}'.format(self.default_config_file)) if self.default_config_file is None: conf_file = super(OneConfPerActionOptionsCliEnv, self).\ _find_conf_file(conf, params) else: conf_name = conf['name'] conf_file = params[conf_name] logger.debug('config: {}, name: {}, params: {}, default_config_file: {}' .format(conf, conf_name, params, self.default_config_file)) if conf_file is None: if os.path.isfile(self.default_config_file): conf_file = self.default_config_file elif 'expect' in conf and conf['expect']: if conf_file is None: raise IOError('no configuration file defined in: %s or %s' % (conf['name'], self.default_config_file)) raise IOError('no such configuration file: %s' % conf_file) return conf_file
zensols.actioncli
/zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/peraction.py
peraction.py