code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__author__ = 'Zhang Fan'
from threading import Lock as base_lock
class BLock():
'''阻塞器'''
def __init__(self):
self.__is_lock = False
self.__lock = base_lock()
self.__lock.acquire()
def lock(self, show_lock_err=False):
'''
调用此方法的一个线程会被阻塞
:param show_lock_err: 如果一个线程已锁定的状态下,另一个线程申请锁定,在show_lock_err为True时会报错,为False时后者不会阻塞
'''
if self.__is_lock:
if not show_lock_err:
return
raise AssertionError('一个线程已阻塞')
self.__is_lock = True
self.__lock.acquire() # 阻塞
self.__is_lock = False
def unlock(self):
if self.__is_lock:
self.__lock.release()
@property
def locked(self):
return self.__is_lock
class BLock_more():
'''多人阻塞器'''
def __init__(self):
self.__lock = base_lock()
self.__lock.acquire()
self.__lock_count = 0
self.__join_lock = base_lock()
def lock(self):
'''任何线程调用此方法都会被阻塞'''
if self.__lock_count == 0:
self.__join_lock.acquire()
self.__lock_count += 1
with self.__lock: # 阻塞
self.__lock_count -= 1
if self.__lock_count == 0:
if self.__join_lock.locked():
self.__join_lock.release()
self.__lock.acquire()
def unlock(self):
'''解除所有调用lock()线程的阻塞'''
if self.__lock_count > 0:
self.__lock.release()
def locker_count(self):
'''有多少线程阻塞中'''
return self.__lock_count
def join(self):
'''阻塞直到所有调用lock()的线程解除阻塞'''
if self.__lock_count > 0:
self.__join_lock.acquire()
self.__join_lock.release()
if __name__ == '__main__':
print('测试单人阻塞器')
def fun():
for i in range(5):
time.sleep(1)
a.unlock()
import threading
import time
a = BLock()
threading.Thread(target=fun).start()
for i in range(5):
print('开始锁定', i, time.strftime('%H:%M:%S', time.localtime()))
a.lock() # 阻塞
print(' 解除', i, time.strftime('%H:%M:%S', time.localtime()))
print('结束\n\n')
if __name__ == '__main__':
print('测试多人阻塞器')
def fun1(value):
print('阻塞', value)
a.lock()
print(' 解除', value)
def fun2():
print('--2秒后解除所有阻塞--', time.strftime('%H:%M:%S', time.localtime()))
time.sleep(2)
print('--即将解除所有阻塞--', time.strftime('%H:%M:%S', time.localtime()))
a.unlock()
import threading
import time
a = BLock_more()
for i in range(5):
threading.Thread(target=fun1, args=(i,)).start()
threading.Thread(target=fun2).start()
a.join()
print('结束') | zblocker | /zblocker-1.0.2-py3-none-any.whl/zblocker.py | zblocker.py |
zbox
====
.. image:: https://pypip.in/version/zbox/badge.svg
:target: https://pypi.python.org/pypi/zbox/
:alt: Latest Version
.. image:: https://pypip.in/py_versions/zbox/badge.svg
:target: https://pypi.python.org/pypi/zbox/
:alt: Supported Python versions
.. image:: https://pypip.in/wheel/zbox/badge.svg
:target: https://pypi.python.org/pypi/zbox/
:alt: Wheel Status
.. image:: https://travis-ci.org/jiffyclub/zbox.svg?branch=master
:target: https://travis-ci.org/jiffyclub/zbox
:alt: Travis-CI Status
zbox is a tiny library to help me use toolz_ and cytoolz_.
I frequently use ``toolz`` and would like to use ``cytoolz`` if it's
available, but don't want to put a ``try``/``except`` in
all my projects. By importing ``toolz`` from ``zbox`` I always
get ``cytoolz`` if ``cytoolz`` is installed and otherwise I get
``toolz``.
Installation
------------
zbox is on PyPI, install it with: ``pip install zbox``.
zbox works on Python 2 and Python 3.
Usage
-----
.. code::
from zbox import toolz
If cytoolz_ is installed ``toolz`` will be ``cytoolz``,
otherwise it will be ``toolz``.
gen
~~~
.. code::
from zbox import gen
``gen`` is a function that converts any iterable into a Python
generator object. I use this with Pandas, which sometimes doesn't
expand iterables unless they are first converted to a generator.
.. _toolz: http://toolz.readthedocs.org/
.. _cytoolz: https://github.com/pytoolz/cytoolz/
| zbox | /zbox-1.2.0.zip/zbox-1.2.0/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
import warnings
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "14.0"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
DEFAULT_SAVE_DIR = os.curdir
def _python_cmd(*args):
"""
Execute a command.
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
"""Install Setuptools."""
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
"""Build Setuptools egg."""
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""Supplement ZipFile class to support context manager for Python 2.6."""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""Construct a ZipFile or ContextualZipFile as appropriate."""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
"""
Unzip filename to a temporary directory, set to the cwd.
The unzipped target is cleaned up after.
"""
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
"""Download Setuptools."""
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, download_delay=15):
"""
Ensure that a setuptools version is installed.
Return None. Raise SystemExit if the requested version
or later cannot be installed.
"""
to_dir = os.path.abspath(to_dir)
# prior to importing, capture the module state for
# representative modules.
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
pkg_resources.require("setuptools>=" + version)
# a suitable version is already installed
return
except ImportError:
# pkg_resources not available; setuptools is not installed; download
pass
except pkg_resources.DistributionNotFound:
# no version of setuptools was found; allow download
pass
except pkg_resources.VersionConflict as VC_err:
if imported:
_conflict_bail(VC_err, version)
# otherwise, unload pkg_resources to allow the downloaded version to
# take precedence.
del pkg_resources
_unload_pkg_resources()
return _do_download(version, download_base, to_dir, download_delay)
def _conflict_bail(VC_err, version):
"""
Setuptools was imported prior to invocation, so it is
unsafe to unload it. Bail out.
"""
conflict_tmpl = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""")
msg = conflict_tmpl.format(**locals())
sys.stderr.write(msg)
sys.exit(2)
def _unload_pkg_resources():
del_modules = [
name for name in sys.modules
if name.startswith('pkg_resources')
]
for mod_name in del_modules:
del sys.modules[mod_name]
def _clean_check(cmd, target):
"""
Run the command to download target.
If the command fails, clean up before re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell.
Powershell will validate trust.
Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
"""Determine if Powershell is available."""
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""Use Python to download the file, without connection authentication."""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, delay=15,
downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename.
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package.
Returns list of command line arguments.
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""Parse the command line for options."""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
parser.add_option(
'--to-dir',
help="Directory to save (and re-use) package",
default=DEFAULT_SAVE_DIR,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def _download_args(options):
"""Return args for download_setuptools function from cmdline args."""
return dict(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
to_dir=options.to_dir,
)
def main():
"""Install or upgrade setuptools and EasyInstall."""
options = _parse_args()
archive = download_setuptools(**_download_args(options))
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main()) | zbox | /zbox-1.2.0.zip/zbox-1.2.0/ez_setup.py | ez_setup.py |
# Create your Zetabase account
```
from zbpy import client
%createaccount
```
# Create a pandas DataFrame
```
import pandas as pd
from sklearn import datasets
import numpy as np
data = pd.read_csv('datasets/train.csv')
```
---
# Set up your Zetabase client with either ecdsa or jwt security:
### Ecdsa
```
pub_key = client.import_key('./zb/zetabase.1591621160.pub', public=True)
priv_key = client.import_key('./zb/zetabase.1591621160.priv', public=False)
zb = client.ZetabaseClient('18259baf-b9e7-4cbd-9027-ca6a4dae1af1')
zb.connect()
zb.set_id_key(priv_key, pub_key)
```
### Jwt
```
zb = client.ZetabaseClient('18259baf-b9e7-4cbd-9027-ca6a4dae1af1')
zb.connect()
zb.set_id_password('test_user', 'test_pass')
zb.auth_login_jwt()
```
---
# Insert your DataFrames into an existing Zetabase table or create a new one with indexed fields based on the columns of your dataframe!
### Inserting into existing table
```
zb.put_dataframe('titanic', data, 'raw')
```
### Inserting into new table
```
zb.put_dataframe_new_table('titanic_a14', data, 'raw', allow_jwt=True)
```
---
# List keys from your table, retrieve your data, and convert it back to pandas DataFrames
```
list_keys = zb.list_keys('titanic_a14')
keys = [key for key in list_keys]
output_data = zb.get('titanic_a14', keys)
df = output_data.to_dataframe()
```
---
# Engineer your features
```
df['FamSize'] = df['SibSp'] + df['Parch']
mapping_fam_size = {0: 0, 1: .25, 2: .5, 3: .75, 4: 1, 5: 1.25, 6: 1.5, 7: 1.75, 8: 2, 9: 2.25, 10: 2.5, 11: 2.75, 12: 3}
mapping_sex = {'male': 0, 'female': 1}
mapping_cabin = {'A': 0, 'B': .5, 'C': 1, 'D': 1.5, 'E': 2, 'F': 2.5, 'G': 3, 'T': 3.5}
df['Sex'] = df['Sex'].map(mapping_sex)
df['Cabin'] = df['Cabin'].str[:1]
df['Cabin'] = df['Cabin'].map(mapping_cabin)
df['FamSize'] = df['FamSize'].map(mapping_fam_size)
df.loc[df['Fare'] <= 20, 'Fare'] = 0,
df.loc[(df['Fare'] > 20) & (df['Fare'] <= 40), 'Fare'] = 1,
df.loc[(df['Fare'] > 40) & (df['Fare'] <= 100), 'Fare'] = 2,
df.loc[df['Fare'] > 100, 'Fare'] = 3
df.loc[df['Age'] <= 17, 'Age'] = 0,
df.loc[(df['Age'] > 17) & (df['Age'] <= 30), 'Age'] = 1,
df.loc[(df['Age'] > 30) & (df['Age'] <= 40), 'Age'] = 2,
df.loc[(df['Age'] > 40) & (df['Age'] <= 50), 'Age'] = 3,
df.loc[(df['Age'] > 50) & (df['Age'] <= 60), 'Age'] = 4,
df.loc[df['Age'] > 60, 'Age'] = 5
df['Cabin'].fillna(df.groupby('Pclass')['Cabin'].transform('median'), inplace=True)
df.fillna(2, inplace=True)
pass_ids = df['PassengerId']
features_to_drop = ['Ticket', 'SibSp', 'Parch', 'Name', 'Embarked', 'PassengerId']
df = df.drop(features_to_drop, axis=1)
```
---
# Save your featurized data back into Zetabase
```
zb.put_dataframe('titanic_a14', df, 'feat')
```
# Retrieve only the featurized data and split it into training and testing groups
```
from sklearn.model_selection import train_test_split
list_keys = zb.list_keys_with_pattern('titanic_a14', 'feat/%')
keys = [key for key in list_keys]
data_feat = zb.get('titanic_a14', keys)
df_new = data_feat.to_dataframe()
x = df_new[['Pclass', 'Sex', 'Age', 'Fare', 'Cabin', 'FamSize']]
y = df_new['Survived']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
```
---
# Create and fit your model to the training data
```
from sklearn import ensemble
gb_clf = ensemble.GradientBoostingClassifier()
gb_clf.fit(x_train, y_train)
```
---
# Save your model to Zetabase
```
import _pickle as cPickle
model_to_save = cPickle.dumps(gb_clf)
zb.put_data('mlModels', 'titanic_gb_clf', model_to_save, overwrite=True)
```
---
# Reload your pre-trained model
```
key = 'titanic_gb_clf'
get_model = zb.get('mlModels', [key])
pickled_model = get_model.data()
titanic_clf = cPickle.loads(pickled_model[key])
```
# Make predictions with your model
```
df_new = df_new.drop(['Survived'], axis=1)
prediction = titanic_clf.predict(df_new).copy()
results = pd.DataFrame({
'PassengerId': pass_ids,
'Survived': prediction
})
print(results)
```
# Save your predictions to a new table
```
zb.put_dataframe_new_table('titanic_preds1', results, 'preds')
```
| zbpy | /zbpy-0.1.1.tar.gz/zbpy-0.1.1/Example Project.ipynb | Example Project.ipynb |
# zbpy: official Zetabase client for Python
The `zbpy` package provides a pure-Python Zetabase client and reference implementation of the Zetabase protocol, along with integrations for commonly used Python tools like Numpy/Pandas.
For more complete documentation, please refer to the main documentation section on the Zetabase website:
1. [Python quick start](https://zetabase.io/docs/#/pysetup)
2. [Python library reference](https://zetabase.io/static/docs-python/html/zbpy.html)
3. [Zetabase main documentation](https://zetabase.io/docs/#/)
## Prerequisites and external dependencies
1. Python 3.6 or higher
2. `gcc` or equivalent compiler (except Windows)
3. `gmp` library and headers (except Windows)
**Note**: a C compiler is not required when running `zbpy` on Windows. However, all requests made with ECDSA on Windows will be
slightly slower when compared to other operating systems due to limitations of the platform. We recommend that heavy workloads
on Windows use JWT authentication when possible.
### Installing gmp (if needed)
1. OSX: `brew install gmp`
2. Ubuntu: `apt-get install libgmp3-dev`
3. Centos: `yum install gmp gmp-devel gmp-status`
Not required for Windows OS.
## Installation
Run the following to install:
```bash
pip3 install zbpy
```
You may get an error indicating you need to install `cython`. In this case, simply run the following:
```bash
pip3 install cython
```
And then re-run `pip3 install zbpy`.
## Creating an account
If you do not have an account already you can easily create one through the Python client module. If you are using Juptyer notebooks, simply use the Jupyter account creation magic:
```python
from zbpy import client
%createaccount
```
The `%createaccount` magic will run you through an interactive wizard to create a new Zetabase user identity.
Otherwise, run the following code within the Python interactive shell to go through the same wizard
on the console:
```python
from zbpy import util
util.new_account_interactive()
```
Answer the prompts that will appear, and if the account is created successfully, three files will be created in your current directory. These are:
1. your private key;
2. your public key; and
3. an identity file containing both keys along with your user ID.
## Test your installation
To test that everything has installed correctly run the `test_zbpy` method from `zbpy.util` in Jupyter or the Python interactive shell:
```python
from zbpy import util
util.test_zbpy()
```
## Library usage
### Creating a Zetabase client
When you created your identity, you were assigned a user id (a uuid, or random-looking string of letters and numbers). use this to instantiate your client.
```python
from zbpy import client
zb_client = client.ZetabaseClient('YOUR USER ID')
```
## Connecting your client to Zetabase
### To use JWT authentication for all requests
When you created your identity, you created a "name" (handle) and administrator password. You can use these instead of your public and private keys if your tables are configured to allow it.
```python
zb.login_jwt('YOUR USERNAME', 'YOUR PASSWORD')
```
### To use ECDSA authentication for all requests
```python
zb_client.setup_ecdsa('FILEPATH TO PRIVATE KEY', 'FILEPATH TO PUBLIC KEY')
```
### Creating Tables
#### With Pandas
**Note**: There are two methods to create tables using zbpy. There are two optional parameters with both of the methods:
1. `perms`: used to specify the permissions of the table (can also be added to an existing table using the `add_perm()` method)
2. `allow_jwt`: if true, allows data to be put into the table using JWT authentication.
If you are creating a table to hold a Pandas dataframe, the easiest way is to use the following function. This will create a table with indexed fields that match the names and types of the columns of your dataframe, and then it inserts your dataframe into the given table using some given "dataframe key" to identify it.
```python
zb_client.put_dataframe_new_table('TABLE ID', YOUR DATAFRAME, 'YOUR DF KEY')
```
If you would like a subset of the DataFrame's columns to be turned into indexed fields in the table use the 'specify_fields' parameter.
```python
zb_client.put_dataframe_new_table('Table ID', YOUR DATAFRAME, 'YOUR DF KEY', specify_fields=['age', 'height'])
```
This field can be `[]` to not index any fields (i.e. if you have no intention of querying the table based on field values).
#### Custom tables (no Pandas)
In this case, we create a new table by passing in a set of zero or more fields to index and some given list of permissions, e.g.:
```python
from zbpy.indexedfieldentity import IndexedField
from zbpy import zb_protocol_pb2 as zb
index_age = IndexedField('age', zb.QueryOrdering.INTEGRAL_NUMBERS)
index_height = IndexedField('height', zb.QueryOrdering.REAL_NUMBERS)
zb_client.create_table('TABLE ID', zb.TableDataFormat.JSON, [index_age, index_height], [OPTIONAL PERMS], allow_jwt=True)
```
### Creating permissions and adding them to existing tables
```python
from zbpy.permissionentity import PermEntry
from zbpy import zb_protocol_pb2 as zb
perm = PermEntry(zb.PermissionLevel.READ, zb.PermissionAudienceType.PUBLIC, '')
zb_client.add_permission('TABLE ID', perm)
```
### Retrieving data and Pagination
When using the functions `list_keys()`, `get()`, and `query()`, the data is returned as a `GetPages` object. A `GetPages` objects can be iterated over or turned into a Pandas dataframes using the `to_dataframe()` method (both demonstrated below).
#### Retrieving keys from table
```python
list_keys = zb_client.list_keys('TABLE ID')
keys = [key for key in list_keys]
```
#### Retrieving data by key
```python
result = zb_client.get('TABLE ID', ['KEY 1', 'KEY 2', 'KEY 3', 'etc.'])
dataframe = result.to_dataframe()
```
### Retrieving data as objects
The `return_pretty` method will pre-parse JSON objects for you.
```python
result = zb_client.get('TABLE ID', ['KEY 1', 'KEY 2', 'KEY 3', 'etc.'])
result.return_pretty()
for i in result:
print(i)
```
#### Retrieving data by query
To query data from Zetabase, we have a Python-based DSL ("domain-specific language") that allows you to express queries. The idea is to use `Field` objects to represent indexed fields and to build queries based on them. We can then use comparison operators on each field to create a subquery, and we can combine subquery with logical operators. See [the documentation for more information](https://zetabase.io/docs/#/keyvalue).
The example below assumes that a table exists with indexed fields 'age' and 'name'. Queries use '&' and '|' for 'and' and 'or' operators -- for that reason, use parentheses to avoid operator precedence issues.
```python
from zbpy import queries
age = Field('age')
name = Field('name')
query = ((age == 19) | ((age > 25) & (age <= 27))) & (name == 'Austin')
result = zb_client.query('TABLE ID', query)
for i in result:
print(i)
```
### Inserting data
To insert a Pandas dataframe into an existing table, use the `put_dataframe()` method. Each row of the dataframe will be inserted as its own object, the collection of which is identified by a key: the `df_key` parameter. Dataframes can be appended to one another by simply storing a new dataframe using the same `df_key` on the same table as an existing dataframe.
```python
zb_client.put_dataframe('TABLE ID', YOUR DATAFRAME, 'YOUR DF KEY')
```
To insert data without Pandas, we can use `put_data` for a single object, or `put_multi` for a list of objects:
```python
zb_client.put_data('TABLE ID', 'DATA KEY', DATA AS BYTES)
zb_client.put_multi('TABLE ID', ['KEY 1', 'KEY 2', 'KEY 3', 'etc.'], [DATA1 AS BYTES, DATA2 AS BYTES, etc.])
```
#### Notes
1. For performance reasons, to insert multiple pieces of data, it is suggested to use the `put_multi()` method.
2. When possible, if storing large quantities of data, it is faster to use JWT over ECDSA if possible.
| zbpy | /zbpy-0.1.1.tar.gz/zbpy-0.1.1/README.md | README.md |
# zBrac : A multilanguage tool for zTree
Created by Ali Seyhun Saral and Anna Schroeter
Licensed under GNU General Public License v3.0
## About the project
zBrac is a tool designed for easy modification of the text in z-Tree treatment files. With zBrac, you can export specified text into *a language file* and import it back after the text modification or translation.
The main advantage of using zBrac for z-Tree experiment development is that that the text in zTree files can be modified easily at any time, which means the coding process and the text design/translation process can be independently done.
zBrac's design is specifically tailored for multicultural studies: By using the software, you can code your experiment once and send the original language file for translation. zBrac also tries to tackle encoding issues special characters in z-Tree by offering different encoding options.
zBrac is particularly useful when the treatment file contains the same piece of text several times. Such duplicate text is very common in z-Tree programming as it is often needed to copy-paste stage tree elements. zBrac recognizes each unique key as a single key and it would be enough to provide the text to replace for this key at once. For an example, please see below for the Holt and Laury measure example.
**zBrac is free/open-source software (See GNU GPL-3 Licence). You can use, modify and distribute it. It is not obligatory to cite the software, although it is highly appreciated. (see below about citing the paper) **
### Citing the software
Currently, a paper about zBrac is submitted to a scientific journal. If you use the software and would like to cite the paper, please visit this page or contact us before you share your manuscript to check the status of the paper.
For more information, visit project page: www.zbrac.com
| zbrac | /zbrac-1.0.10.tar.gz/zbrac-1.0.10/README.md | README.md |
# Zest
A function-oriented testing framework for Python 3.
Written by Zack Booth Simpson, 2020
Available as a pip package: `pip install zbs.zest`
# Motivation
Python's default unittest module is a class-oriented approach that
does not lend itself well to recursive setup and teardown.
Zest uses a recursive function-based approach best demonstrated
with examples.
```python
##########################################
# some_module.py
def _say_hello():
print("Hello")
def unit_under_test(a):
if a <= 0:
raise ValueError("a should be positive")
_say_hello()
return a + 1
##########################################
# zest_some_module.py
from zest import zest
import some_module
def zest_unit_under_test():
# This is a root-level zest because it starts with "zest_"
def it_raises_on_non_positive():
def it_raises_on_negative():
with zest.raises(ValueError):
some_module.unit_under_test(-1)
def it_raises_on_zero():
with zest.raises(ValueError):
some_module.unit_under_test(0)
zest() # Note this call which tells zest to run the above two tests
def it_calls_say_hello_once():
with zest.mock(some_module._say_hello) as m_say_hello:
some_module.unit_under_test(0)
assert m_say_hello.called_once()
zest() # Same here, this will cause it_raises_on_non_positive and it_calls_say_hello_once to run
```
The zest() function uses stack reflection to call each function that
it finds in the caller's stack-frame. However, it only calls functions
that do not start with an underscore.
Two special functions are reserved: _before() and _after()
which are called before/after _each_ test function in the scope.
For example, often you may want to set up some complex state.
```python
def zest_my_test():
state = None
def _before():
nonlocal state
state = State(1, 2, 3)
def it_raises_if_bad():
with zest.raises(Exception):
unit_under_test(state)
def it_modifies_state_on_1():
unit_under_test(state, 1)
assert state.foo == 1
def it_modifies_state_on_2():
unit_under_test(state, 2)
assert state.foo == 2
```
# Examples
See `./zests/zest_examples.py` for more examples.
# Usage
Search recursively all directories for def zest_*() functions and execute them.
```bash
$ zest
```
Show progress
```bash
$ zest --verbose=0 # Show no progress
$ zest --verbose=1 # Show "dot" progress (default)
$ zest --verbose=2 # Show hierarchical full progress
```
Search only inside the specific dirs
```bash
$ zest --include_dirs=./abc:./def
```
Run only tests that are in the "integration" or "slow" groups
```bash
$ zest --groups=integration:slow
```
Run only tests that contain the string "foobar". This will also
run any parent test needed to execute the match.
```bash
$ zest foobar
```
Disable test order shuffling which is on by default to increase the
liklihood that accidental order-dependencies are manifest.
```bash
$ zest --disable_shuffle
```
# Helpers
## Expected exceptions
```python
def zest_foobar_should_raise_on_no_arguments():
with zest.raises(ValueError):
foobar()
```
Sometimes you may wish to check a property of the trapped exception
```python
def zest_foobar_should_raise_on_no_arguments():
with zest.raises(ValueError) as e:
foobar()
assert e.exception.args == ("bad juju",)
```
Often you may wish to check only for a string of a property of the trapped exception
in which case you can use the in_* argument to the raises.
```python
def zest_foobar_should_raise_on_no_arguments():
with zest.raises(ValueError, in_args="bad juju") as e:
foobar()
```
## Mocks
```python
import unit_under_test
def zest_foobar():
with zest.mock(unit_under_test.bar) as m_bar:
# Suppose unit_under_test.foobar() calls bar()
m_bar.returns(0)
unit_under_test.foobar()
assert m_bar.called_once_with(0)
```
See `zest.MockFunction` for a complete MockFunction API.
# Gotchas
Don't forget to put the zest() call at each level of the test.
If you forget, the zest runner will throw an error along the lines of:
"function did not terminate with a call to zest()..."
```python
def zest_something():
def it_foos():
foo()
def it_bars():
bar()
# WRONG! zest() wasn't called here. Error will be thrown when the test is run.
```
Do not mock outside of test functions:
```python
def zest_something():
with zest.mock(...):
def it_does_something():
assert something
def it_does_something_else():
assert something
# The zest() will execute outside of the above "with" statement so
# the two tests will not inherit the mock as expected.
zest()
```
Rather, put the zest() inside the "with mock":
```python
def zest_something():
with zest.mock(...):
def it_does_something():
assert something
def it_does_something_else():
assert something
# This is fine because zest() was called INSIDE the with
zest()
```
Don't have more than one zest() call in the same scope.
```python
def zest_something():
with zest.mock(...):
def it_does_something():
assert something
def it_does_something_else():
assert something
# Like above example; so far, so good, but watch out...
zest()
with zest.mock(...):
def it_does_yet_another_thing():
assert something
# WRONG! A second call to zest() will RE-EXECUTE the above two tests
# (it_does_something and it_does_something_else) because this
# second call to zest() doesn't know that it is inside of a with statement.
# The "with" scope makes it look different but really the following
# call to zest() and the call to zest above are actually in the same scope.
zest()
```
When asserting on properties of an expected exception,
be sure to do assert outside the scope of the "with" as demonstrated:
Wrong:
```python
with zest.raises(SomeException) as e:
something_that_raises()
assert e.exception.property == "something"
# The above "assert" will NOT be run because the exception thrown by
# something_that_raises() will be caught and never get to execute the assert!
```
Right:
```python
with zest.raises(SomeException) as e:
something_that_raises()
assert e.exception.property == "something"
# (Note the reference to "e.exception." as opposed to "e."
```
Remember that the exception returned from a zest.raises() is
*not* of the type you are expecting but rather of a wrapper
class called `TrappedException`. To get to the properties
of interest you need to use `e.exception.*`.
Wrong:
```python
with zest.raises(SomeException) as e:
something_that_raises()
assert e.property == "something"
# Wrong! e is of type TrappedException therefore the above will not work as expected.
```
Right:
```python
with zest.raises(SomeException) as e:
something_that_raises()
assert e.exception.property == "something"
# Correct, .exception reference to get original exception from the `e` TrappedException wrapper.
```
# Development
## Run in development mode
```bash
pipenv shell
pipenv sync
# Run all the example tests (which actually test the tester itself).
$ ./zest.sh
```
## Deploy
```bash
$ ./deploy.sh
```
You will need the user and password and credentials for Pypi.org
# TODO
* Convert over to using logging correctly
* When debug mode is on in ui, and a test runs, you don't see the success increment
* --ui fails is broken
* When match string matches nothing it is confusing. Need "nothing was run"
* Add a "slowest last" to UI
* Add "raises" to mock and stack mock. And error if BOTh returns and raises are set
* Add --rng_seed option
* Make searches more clear -- currently hard-coded to only search "zests" directories
* Harden failed imports on zest runner AST import
* Mirror debugger-like stack introspection into a set of check-like helpers for type, arrays, etc.
* Add a zest internal test that _after is called even if the subsequent test exceptions (that is, _after is in a finally block)
* Add coverage
| zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/README.md | README.md |
import os
import ast
import pkgutil
import sys
from typing import List
from dataclasses import dataclass
from importlib import util
from zest.zest import log, check_allow_to_run
def _walk_include_dirs(root, include_dirs):
"""
Generator to walk from root though all included_dirs
finding any folder that is called "/zests/"
Arguments:
root: String
Root folder
include_dirs: String
Colon-delimited list of paths to search relative to root
"""
for folder in (include_dirs or "").split(":"):
for curr, dirs, _ in os.walk(os.path.abspath(os.path.join(root, folder))):
# os.walk allows modifying the dirs. In this case, skip hidden
dirs[:] = [d for d in dirs if d[0] != "."]
if curr.endswith("/zests"):
yield curr
@dataclass
class FoundZest:
name: str
groups: []
errors: []
children: []
skip: str = None
def _recurse_ast(path, lineno, body, func_name=None, parent_name=None):
"""
TODO
body:
This body of the module or function that is being analyzed
parent_name:
If not a module, this will contain the name of the parent
For example:
some_module:
def zest_root_1():
def it_foos():
pass
@zest.group("group1")
def it_bars():
pass
zest()
def zest_root_2():
def it_goos():
pass
@zest.skip("Ignore me")
def it_zoos():
pass
zest()
def it_bad_zest_declared_after_the_call_to_zest():
pass
_recurse_ast("./path/some_module.py", 0, body_of_some_module, None, None)
Which will return:
[
FoundZest(
name="zest_root_1",
groups=[],
errors=[],
children=[
FoundZest("it_foos"),
FoundZest("it_bars", groups=["group1"]),
],
skip=None,
),
FoundZest(
name="zest_root_2",
groups=[],
errors=["it_bad_zest_declared_after_the_call_to_zest was declared...."],
children=[
FoundZest("it_goos"),
FoundZest("it_zoos", skip="Ignore me"),
],
skip=None,
),
]
The tricky thing here is that _recurse_ast is called in two contexts:
1. At a module level where parent_name and func_name are None
2. On a function where func_name is not None and parent_name
might be None if this is a root-level test.
In other words:
If func_name is None then "body" is a MODULE.
If func_name is not None and parent_name is None then "body" is a root-level FUNCTION
If func_name is not None and parent_name is not None then "body" is a child FUNCTION
Errors apply only to FUNCTIONS not to modules.
"""
is_module_level = func_name is None
is_root_zest = func_name is not None and parent_name is None
# Will be incremented for each def that is found in this context
# that does not start with an underscore.
n_test_funcs = 0
# Flag that will be set if any zest() call is found (there should be only one at the end!)
found_zest_call = False
# Flag that will be set if any test function is declared AFTER the call to zest()
found_zest_call_before_final_func_def = False
# The zests found in this context
found_zests = []
errors = []
for i, part in enumerate(body):
if isinstance(part, ast.With):
_found_zests, _errors = _recurse_ast(path, part.lineno, part.body, func_name, parent_name)
found_zests += _found_zests
errors += _errors
if isinstance(part, ast.FunctionDef):
this_zest_groups = []
this_zest_skip_reason = None
if (is_module_level and part.name.startswith("zest_")) or (
not is_module_level and not part.name.startswith("_")
):
this_zest_name = part.name
if part.decorator_list:
for dec in part.decorator_list:
if isinstance(dec, ast.Call):
if isinstance(dec.func, ast.Attribute):
if dec.func.attr == "group":
group = dec.args[0].s
this_zest_groups += [group]
elif dec.func.attr == "skip":
if len(dec.args) == 1:
reason = dec.args[0].s
else:
reason = dec.keywords[0].value.s
this_zest_skip_reason = reason
# RECURSE
n_test_funcs += 1
this_zest_children, this_zest_errors = _recurse_ast(
path, part.lineno, part.body, this_zest_name, parent_name
)
found_zests += [
FoundZest(
name=this_zest_name,
groups=this_zest_groups,
errors=this_zest_errors,
children=this_zest_children,
skip=this_zest_skip_reason,
)
]
if found_zest_call:
# A call to zest() has already been seen previously in this context
# therefore it is an error to define another function after this point
# so we set the following flag
found_zest_call_before_final_func_def = True
# Check for the call to "zest()"
if (
isinstance(part, ast.Expr)
and isinstance(part.value, ast.Call)
and isinstance(part.value.func, ast.Name)
and part.value.func.id == "zest"
):
found_zest_call = True
if n_test_funcs > 0 and not is_module_level:
if found_zest_call_before_final_func_def:
error_message = "called zest() before all functions were defined."
errors += [(func_name, path, lineno, error_message)]
elif not found_zest_call:
error_message = "did not terminate with a call to zest()"
errors += [(func_name, path, lineno, error_message)]
return found_zests, errors
def load_module(root_name, module_name, full_path):
# TODO: Add cache here?
spec = util.spec_from_file_location(module_name, full_path)
mod = util.module_from_spec(spec)
sys.modules[spec.name] = mod
spec.loader.exec_module(mod)
return getattr(mod, root_name)
def _flatten_found_zests(
found_zests_tree: List[FoundZest], parent_name, parent_groups
) -> List[FoundZest]:
"""
Convert a tree of found_zests_tree into a flat list converting
the names to full names using a dot delimiter.
"""
ret_list = []
for found_zest in found_zests_tree or []:
found_zest.name = (
parent_name + "." if parent_name is not None else ""
) + found_zest.name
_parent_groups = set(parent_groups) | set(found_zest.groups)
found_zest.groups = list(_parent_groups)
children = _flatten_found_zests(
found_zest.children, found_zest.name, _parent_groups
)
found_zest.children = None
ret_list += [found_zest]
ret_list += children
return ret_list
def find_zests(
root,
include_dirs,
allow_to_run=None,
allow_files=None,
match_string=None,
exclude_string=None,
bypass_skip=None,
groups=None,
exclude_groups=None,
):
"""
Traverses the tree looking for /found_zests/ folders and opens and parses any file found
Arguments:
root:
Root path
include_dirs: String
Colon-delimited folders to search
allow_to_run:
If not None: a list of full test names (dot-delimited) that will be included.
Plus two specials: "__all__" and "__failed__"
If the name ends in "." then all children run too
allow_files:
If not None: a list of filenames (without directory) that will be included.
match_string:
If not None then any zest full name that *contains* this string will be included.
Note that match_string only narrows the scope from allow_to_run
exclude_string:
If not None then any zest full name that *contains* these strings will be excluded.
only_groups:
Run only this (colon delimited set of groups)
exclude_groups:
Do not run these (colon deliminted) set of groups
Returns:
dict of root found_zests by name -> (module_name, package, path)
set of full names allowed to run (not all test under a root have to be allowed)
list of all errors
Note, when a zest is identified all of its ancestor are also be added to the the list.
Example:
full_name = "zest_test1.it_does_y.it_does_y1"
match_string = "it_does_y1"
Then return_allow_to_run == set(
"zest_test1",
"zest_test1.it_does_y",
"zest_test1.it_does_y.it_does_y1",
)
"""
if root is None:
return {}, {}, []
n_root_parts = len(root.split(os.sep))
if groups is not None:
groups = set(groups)
if exclude_groups is not None:
exclude_groups = set(exclude_groups)
exclude_strings = {}
if exclude_string is not None:
exclude_strings = set([i[0] for i in exclude_string])
return_allow_to_run = set() # Full names (dot delimited) of all tests to run
# root_zest_funcs is a dict of entrypoints (root found_zests) -> (module_name, package, path)
root_zest_funcs = {}
errors_to_show = []
match_string_parts = match_string.split(".") if match_string is not None else []
for curr in _walk_include_dirs(root, include_dirs):
for _, module_name, _ in pkgutil.iter_modules(path=[curr]):
if allow_files is not None:
if module_name not in allow_files:
continue
path = os.path.join(curr, module_name + ".py")
# HACK!
# global debug_hack
# if path == "/erisyon/plaster/plaster/run/sigproc_v2/zests/zest_sigproc_v2_worker.py":
# debug_hack = True
# else:
# debug_hack = False
with open(path) as file:
source = file.read()
try:
module_ast = ast.parse(source)
except SyntaxError as e:
# parent_name, path, lineno, error_message
errors_to_show += [("", curr + "/" + module_name, e.lineno, f"Syntax error in {module_name}")]
break
found_zests, errors = _recurse_ast(path, 0, module_ast.body)
assert len(errors) == 0
found_zests = _flatten_found_zests(found_zests, None, set())
for found_zest in found_zests:
full_name = found_zest.name
full_name_parts = full_name.split(".")
package = ".".join(curr.split(os.sep)[n_root_parts:])
allow = check_allow_to_run(allow_to_run, full_name_parts)
if allow:
# If running all or the full_name matches or if the
# match_string contains an ancestor match
# Eg: match_string == "foo.bar" we have to match on
# foo and foo.bar
any_parent = all([
match_string_parts[i] == full_name_parts[i]
for i in range( min( len(match_string_parts), len(full_name_parts) ) )
])
if match_string is None or match_string in full_name or any_parent:
# So that you can terminate a match_string like "it_foobars."
# we add an extra "." to the end pf full_name in this comparison
if any([e in full_name + "." for e in exclude_strings]):
continue
# IGNORE skips
if found_zest.skip is not None:
# possible skip unless bypassed
if bypass_skip is None or bypass_skip != full_name:
continue
# IGNORE groups not in the groups list or in exclude_groups
if found_zest.groups is not None:
# If CLI groups is specified and there there is no
# group in common between the CLI groups and the
# groups of this test then skip it.
if groups is not None and not set.intersection(
set(found_zest.groups), groups
):
continue
# If CLI exclude_groups is specified and there there *is*
# a group in common between then skip it.
if exclude_groups is not None and set.intersection(
set(found_zest.groups), exclude_groups
):
continue
# FIND any errors from this zest:
for error in found_zest.errors:
errors_to_show += [error]
# Include this and all ancestors in the list
for i in range(len(full_name_parts)):
name = ".".join(full_name_parts[0 : i + 1])
return_allow_to_run.update({name})
root_zest_funcs[full_name_parts[0]] = (module_name, package, path)
return root_zest_funcs, return_allow_to_run, errors_to_show
if __name__ == "__main__":
zests = find_zests(
".", "./zests", allow_files="zest_basics.py", allow_to_run="__all__"
)
for z in zests:
print(z) | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_finder.py | zest_finder.py |
import itertools
import copy
import time
import sys
import os
import re
import curses
import json
import traceback
import logging
from collections import OrderedDict
from pathlib import Path
from collections import defaultdict
from zest.zest import log, strip_ansi, zest
from zest.zest_display import colorful_exception, traceback_match_filename
from zest.zest_runner_single_thread import ZestRunnerSingleThread
from zest.zest_runner_multi_thread import (
ZestRunnerMultiThread,
read_zest_result_line,
clear_output_folder,
)
from . import __version__
if os.name == "nt":
import msvcrt
else:
import select
scr = None
ansi_escape = re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]")
def _kbhit():
"""
Returns True if a keypress is waiting to be read in stdin, False otherwise.
Base on: https://stackoverflow.com/a/55692274
"""
if os.name == "nt":
return msvcrt._kbhit()
else:
dr, dw, de = select.select([sys.stdin], [], [], 0.1)
return dr != []
def _num_key_to_int(key):
return ord(key) - ord("0")
# States
# ----------------------------------------------------------------------------
STOPPED = 0
LOADING = 1
RUNNING = 2
STOPPING = 3
WATCHING = 4
run_state_strs = [
"Stopped",
"Loading",
"Running",
"Stopping (^C to force)",
"Watching",
]
# Draw
# ----------------------------------------------------------------------------
PAL_NONE = 0
PAL_MENU = 1
PAL_MENU_KEY = 2
PAL_MENU_TITLE = 3
PAL_MENU_RUN_STATUS = 4
PAL_NAME = 5
PAL_NAME_SELECTED = 6
PAL_STATUS = 7
PAL_SUCCESS = 8
PAL_FAIL = 9
PAL_SKIPPED = 10
PAL_FAIL_KEY = 11
PAL_ERROR_LIB = 12
PAL_ERROR_PATHNAME = 13
PAL_ERROR_FILENAME = 14
PAL_ERROR_CONTEXT = 15
PAL_ERROR_MESSAGE = 16
PAL_ERROR_BASE = 17
PAL_ERROR_LINENO = 18
PAL_LINE = 19
PAL_STDOUT = 20
PAL_STDERR = 21
PAL_STATUS_KEY = 22
PAL_SUCCESS_BOX = 23
PAL_ERROR_BOX = 24
pal = [
# PAL_NONE
(-1, -1, 0),
# PAL_MENU
(curses.COLOR_BLACK, curses.COLOR_WHITE, 0),
# PAL_MENU_KEY
(curses.COLOR_RED, curses.COLOR_WHITE, curses.A_BOLD),
# PAL_MENU_TITLE
(curses.COLOR_BLUE, curses.COLOR_WHITE, 0),
# PAL_MENU_RUN_STATUS
(curses.COLOR_CYAN, curses.COLOR_WHITE, 0),
# PAL_NAME
(curses.COLOR_CYAN, -1, 0),
# PAL_NAME_SELECTED
(curses.COLOR_CYAN, -1, curses.A_BOLD),
# PAL_STATUS
(curses.COLOR_CYAN, -1, 0),
# PAL_SUCCESS
(curses.COLOR_GREEN, -1, 0),
# PAL_FAIL
(curses.COLOR_RED, -1, curses.A_BOLD),
# PAL_SKIPPED
(curses.COLOR_YELLOW, -1, 0),
# PAL_FAIL_KEY
(curses.COLOR_RED, -1, curses.A_BOLD),
# PAL_ERROR_LIB
(curses.COLOR_BLACK, -1, curses.A_BOLD),
# PAL_ERROR_PATHNAME
(curses.COLOR_YELLOW, -1, 0),
# PAL_ERROR_FILENAME
(curses.COLOR_YELLOW, -1, curses.A_BOLD),
# PAL_ERROR_CONTEXT
(curses.COLOR_MAGENTA, -1, curses.A_BOLD),
# PAL_ERROR_MESSAGE
(curses.COLOR_RED, -1, curses.A_BOLD),
# PAL_ERROR_BASE
(curses.COLOR_WHITE, -1, 0),
# PAL_ERROR_LINENO
(curses.COLOR_YELLOW, -1, curses.A_BOLD),
# PAL_LINE
(curses.COLOR_RED, -1, curses.A_BOLD),
# PAL_STDOUT
(curses.COLOR_YELLOW, -1, 0),
# PAL_STDERR
(curses.COLOR_YELLOW, -1, curses.A_BOLD),
# PAL_STATUS_KEY
(curses.COLOR_RED, -1, curses.A_BOLD),
# PAL_SUCCESS_BOX
(curses.COLOR_GREEN, curses.COLOR_WHITE, curses.A_BOLD),
# PAL_ERROR_BOX
(curses.COLOR_RED, curses.COLOR_WHITE, curses.A_BOLD),
]
def addstr(y, x, txt, mode):
try:
scr.addstr(y, x, txt, mode)
except Exception:
pass
def _print(y, x, *args):
def words_and_spaces(s):
# Inspired by http://stackoverflow.com/a/8769863/262271
return list(
itertools.chain.from_iterable(zip(s.split(), itertools.repeat(" ")))
)[:-1]
height = curses.LINES
width = curses.COLS
_y = y
_x = x
mode = pal[PAL_MENU][2] | curses.color_pair(PAL_MENU)
for arg in args:
if isinstance(arg, int):
mode = pal[arg][2] | curses.color_pair(arg)
elif isinstance(arg, tuple):
# this is a single line with formatting, so no word wrapping
_x = x
for a in arg:
if isinstance(a, int):
mode = pal[a][2] | curses.color_pair(a)
else:
a = strip_ansi(str(a))
addstr(_y, _x, a, mode)
_x += len(a)
_y += 1
_x = x
else:
arg = str(arg)
lines = arg.split("\n")
for line_i, line in enumerate(lines):
line = strip_ansi(line)
if _y >= height:
break
len_line = len(line)
if _x + len_line <= width:
addstr(_y, _x, line, mode)
_x += len_line
else:
# Word-wrap
for word in words_and_spaces(line):
if len(word) + _x <= width:
addstr(_y, _x, word, mode)
else:
_y += 1
_x = x
if y >= height - 1:
# Can't go down another line
break
addstr(_y, _x, word, mode)
_x += len(word)
if line_i > 0:
_x = x
_y += 1
_y += 1
return _y, _x
def _up_to_first_space(s):
return s.split(" ", 1)[0]
def draw_menu_fill_to_end_of_line(y, length):
rows, cols = scr.getmaxyx()
if cols - length > 0:
addstr(y, length, f"{' ':<{cols - length}}", curses.color_pair(PAL_MENU))
def draw_title_bar(debug_mode):
y = 0
_, length = _print(
0,
0,
PAL_MENU_TITLE,
f"Zest-Runner v{__version__} ",
PAL_MENU_KEY,
"q",
PAL_MENU,
"uit ",
PAL_MENU,
"run ",
PAL_MENU_KEY,
"a",
PAL_MENU,
"ll ",
PAL_MENU,
"run ",
PAL_MENU_KEY,
"f",
PAL_MENU,
"ails ",
PAL_MENU_KEY,
"c",
PAL_MENU,
"lear ",
PAL_MENU_KEY,
"d",
PAL_MENU,
f"ebug mode:",
PAL_MENU_RUN_STATUS,
'ON' if debug_mode else 'OFF',
" ",
)
draw_menu_fill_to_end_of_line(0, length)
y += 1
return y
def draw_status(y, run_state, match_string, current_running_tests_by_worker_i, n_workers):
_print(
y,
0,
PAL_STATUS_KEY,
"M",
PAL_NONE,
"atch : ",
PAL_STATUS,
match_string or "",
PAL_NONE,
"",
)
y += 1
# _print(
# y, 0, PAL_STATUS_KEY, "C", PAL_NONE, "apture : ", PAL_STATUS, str(capture),
# )
# y += 1
worker_iz = sorted(current_running_tests_by_worker_i.keys())
state_color = PAL_STATUS if run_state == STOPPED else PAL_NAME_SELECTED
_print(
y, 0, PAL_NONE, "Status : ", state_color, run_state_strs[run_state] + " ",
)
y += 1
for worker_i in range(n_workers):
_print(y, 0, PAL_ERROR_LIB, f"{worker_i:>2}) ")
if worker_i < len(current_running_tests_by_worker_i):
name_stack = (current_running_tests_by_worker_i[worker_i] or "").split(".")
_print(
y,
4,
PAL_NAME_SELECTED,
name_stack[0],
PAL_NAME,
"" if len(name_stack[1:]) == 0 else ("." + ".".join(name_stack[1:])),
)
y += 1
return y
def draw_summary(y, n_success, n_errors, n_skips):
_print(
y,
0,
PAL_NONE,
"Last run: ",
PAL_SUCCESS,
"Success",
PAL_NONE,
" + ",
PAL_FAIL,
"Fails",
PAL_NONE,
" + ",
PAL_SKIPPED,
"Skipped",
PAL_NONE,
" = ",
PAL_SUCCESS,
str(n_success),
PAL_NONE,
" + ",
PAL_FAIL,
str(n_errors),
PAL_NONE,
" + ",
PAL_SKIPPED,
str(n_skips),
PAL_NONE,
" = ",
PAL_NONE,
str(n_success + n_errors + n_skips),
)
y += 1
return y
def _errors_from_results(zest_results_by_full_name):
return [res for res in zest_results_by_full_name.values() if res.error is not None]
def draw_fail_lines(y, fails_panel_page, zest_results_by_full_name, root, show_result_full_name):
errors = _errors_from_results(zest_results_by_full_name)
n_errors = len(errors)
if n_errors > 0:
_print(y, 0, PAL_NONE, f"Failed tests {fails_panel_page=}:")
y += 1
for i, error in enumerate(errors[9*fails_panel_page:]):
if i >= 9:
break
name = error.full_name
formatted = error.error_formatted
lines = []
for line in formatted:
lines += [sub_line for sub_line in line.strip().split("\n")]
if len(lines) >= 3:
last_filename_line = lines[-3]
split_line = traceback_match_filename(root, last_filename_line)
if not split_line:
split_line = ("", "", "", "", False)
leading, basename, lineno, context, is_libs = split_line
selected = (
show_result_full_name is not None and show_result_full_name == name
)
_print(
y,
0,
PAL_FAIL_KEY,
str(i + 1),
PAL_NONE,
" ",
PAL_NAME_SELECTED if selected else PAL_NAME,
name,
PAL_ERROR_BASE,
" raised: ",
PAL_ERROR_MESSAGE,
_up_to_first_space(error.error),
PAL_ERROR_BASE,
" ",
PAL_ERROR_PATHNAME,
basename,
PAL_ERROR_BASE,
":",
PAL_ERROR_PATHNAME,
str(lineno),
)
y += 1
if n_errors > 9:
_print(
y, 0,
PAL_ERROR_BASE, f"page {fails_panel_page+1} of {(n_errors // 9) + 1} ",
PAL_STATUS_KEY, "n", PAL_ERROR_BASE, "ext ",
PAL_STATUS_KEY, "p", PAL_ERROR_BASE, "revious", )
y += 1
return y
def draw_warnings(y, warnings):
for i, warn in enumerate(warnings):
_print(
y, 0, PAL_ERROR_BASE, f"WARNING {i}: {warn}",
)
y += 1
return y
def draw_result_details(y, detail_panel_scroll_top, root, zest_result):
log(f"IN draw_result_details {detail_panel_scroll_top=}")
if zest_result is None:
return y
# if run_state == WATCHING:
# _, length = _print(
# y, 0, PAL_MENU, "Watching: ", PAL_MENU_RUN_STATUS, watch_file,
# )
# draw_menu_fill_to_end_of_line(y, length)
# y += 1
_, length = _print(
y, 0, PAL_MENU, "Test result: ", PAL_MENU_RUN_STATUS, zest_result.full_name,
)
draw_menu_fill_to_end_of_line(y, length)
y += 1
_, length = _print(
y,
0,
PAL_MENU_KEY,
"r",
PAL_MENU,
"e-run this test ",
# TODO
# PAL_MENU_KEY,
# "w",
# PAL_MENU,
# "atch test file (auto-re-run) ",
PAL_MENU_KEY,
"h",
PAL_MENU,
"ide this view ",
PAL_MENU_KEY,
"up/down ",
PAL_MENU,
"scroll below ",
)
draw_menu_fill_to_end_of_line(y, length)
y += 1
if zest_result.is_running is True:
_print(y, 0, PAL_NONE, "Runnning...")
y += 1
elif zest_result.error is None:
_print(y, 0, PAL_SUCCESS, "Passed!")
y += 1
else:
formatted = zest_result.error_formatted
# Render all of the details lines list
# Some of those lines will be tuples when we want _print to
# render with formatting otherwise they will be strings
# when the _print is stripping out escape characters
lines = []
formatted_split_lines = []
for line in formatted:
formatted_split_lines += [sub_line for sub_line in line.strip().split("\n")]
is_libs = False
for line in formatted_split_lines:
# s is a single line with formatting
s = []
split_line = traceback_match_filename(root, line)
if split_line is None:
s += [PAL_ERROR_LIB if is_libs else PAL_ERROR_BASE, line]
else:
leading, basename, lineno, context, is_libs = split_line
if is_libs:
s += [PAL_ERROR_LIB, "File ", leading, "/", basename]
s += [PAL_ERROR_LIB, ":", str(lineno)]
s += [PAL_ERROR_LIB, " in function "]
s += [PAL_ERROR_LIB, context]
else:
s += [
PAL_ERROR_BASE,
"File ",
PAL_ERROR_PATHNAME,
leading,
PAL_ERROR_BASE,
"/ ",
PAL_ERROR_FILENAME,
basename,
PAL_ERROR_BASE,
":",
PAL_ERROR_LINENO,
str(lineno),
PAL_ERROR_BASE,
" in function ",
]
s += [(PAL_ERROR_MESSAGE, context)]
lines += [tuple(s)]
error_message = str(zest_result.error).strip()
if error_message != "":
lines += [(PAL_ERROR_MESSAGE, error_message)]
if zest_result.stdout is not None and zest_result.stdout != "":
lines += [(PAL_NONE, "\n")]
lines += [(PAL_NONE, "Stdout:")]
for l in zest_result.stdout.split("\n"):
lines += [(PAL_STDOUT, l)]
if zest_result.stderr is not None and zest_result.stderr != "":
lines += [(PAL_NONE, "\n")]
lines += [(PAL_NONE, "Stderr:")]
for l in zest_result.stderr.split("\n"):
lines += [(PAL_STDERR, l)]
if zest_result.logs is not None and zest_result.logs != "":
lines += [(PAL_NONE, "\n")]
lines += [(PAL_NONE, "Logs:")]
for l in zest_result.logs.split("\n"):
lines += [(PAL_STDERR, l)]
# RENDER those lines starting at scroll offset
for line in lines[detail_panel_scroll_top:-1]:
_print(y, 0, line)
y += 1
return y
def load_results(zest_results_path):
"""
Returns: zest_results_by_full_name
"""
zest_results = []
paths = sorted(Path(zest_results_path).iterdir(), key=os.path.getmtime)
for res_path in paths:
with open(res_path) as fd:
for zest_result in read_zest_result_line(fd):
zest_results += [(zest_result.full_name, zest_result)]
zest_results_by_full_name = OrderedDict(zest_results)
return zest_results_by_full_name
def _run(
_scr, **kwargs,
):
global scr
scr = _scr
num_keys = [str(i) for i in range(1, 10)]
run_state = None
dirty = True
current_running_tests_by_worker_i = {}
n_success = 0
n_errors = 0
n_skips = 0
show_result_full_name = None
run_state = STOPPED
warnings = []
runner = None
failed_to_run = []
debug_mode = kwargs.get("debug_mode", False)
request_run = None
request_stop = False # Stop the current run
request_end = False # Stop current app (is set concurrently with request_stop)
zest_results_path = Path(kwargs.pop("output_folder", ".zest_results"))
root = kwargs["root"]
match_string = kwargs["match_string"]
state_filename = ".zest_state.json"
show_result_box = False
go = kwargs.get("go", False)
detail_panel_scroll_top = 0
fails_panel_page = 0
n_workers = kwargs.get("n_workers", 1)
n_allowed_to_run = 0
def save_state():
try:
with open(state_filename, "w") as f:
f.write(json.dumps(dict(debug_mode=debug_mode, match_string=match_string)))
except:
pass
def load_state():
try:
with open(state_filename, "r") as f:
state = json.loads(f.read())
nonlocal debug_mode, match_string
if state.get("debug_mode", None) is not None:
debug_mode = state.get("debug_mode")
if state.get("match_string", None) is not None and match_string is None:
match_string = state.get("match_string")
except:
pass
load_state()
if go:
clear_output_folder(zest_results_path)
request_run = "__all__"
dirty = True
def render():
nonlocal dirty
if not dirty:
return
dirty = False
n_run = n_success + n_errors + n_skips
scr.clear()
y = draw_title_bar(debug_mode)
y = draw_status(y, run_state, match_string, current_running_tests_by_worker_i, n_workers)
y = draw_summary(y, n_success, n_errors, n_skips)
y = draw_warnings(y, warnings)
draw_fail_lines(y + 1, fails_panel_page, zest_results_by_full_name, root, show_result_full_name)
y = draw_result_details(
y + 13, detail_panel_scroll_top, root, zest_results_by_full_name.get(show_result_full_name),
)
scr.refresh()
if show_result_box:
scr_h = curses.LINES
scr_w = curses.COLS
h = 7
w = 40
y = 11
x = (scr_w - w) // 2
win2 = scr.subwin(h, w, y, x)
win2.clear()
n_failed_to_run = len(failed_to_run)
if n_allowed_to_run != n_run:
assert n_failed_to_run == n_allowed_to_run - n_run
h = n_failed_to_run + 7
w = 80
y = 11
x = (scr_w - w) // 2
win2 = scr.subwin(h, w, y, x)
win2.clear()
win2.attrset(curses.color_pair(PAL_ERROR_BOX))
win2.box()
msg = f"WARNING! {n_allowed_to_run-n_run} failed to run."
win2.addstr(2, (w - len(msg)) // 2, msg, curses.color_pair(PAL_ERROR_BOX))
for i, fail in enumerate(failed_to_run):
msg = fail
win2.addstr(4 + i, 4, msg, curses.color_pair(PAL_ERROR_BOX))
win2.bkgd(' ', curses.color_pair(PAL_ERROR_BOX))
elif n_errors == 0:
win2.attrset(curses.color_pair(PAL_SUCCESS_BOX))
win2.box()
msg = f"SUCCESS!"
win2.addstr(h // 2, (w - len(msg)) // 2, msg, curses.color_pair(PAL_SUCCESS_BOX))
win2.bkgd(' ', curses.color_pair(PAL_SUCCESS_BOX))
else:
win2.attrset(curses.color_pair(PAL_ERROR_BOX))
win2.box()
msg = "ERRORS!"
win2.addstr(h // 2, (w - len(msg)) // 2, msg, curses.color_pair(PAL_ERROR_BOX))
win2.bkgd(' ', curses.color_pair(PAL_ERROR_BOX))
win2.refresh()
def callback(zest_result):
nonlocal dirty, current_running_tests_by_worker_i, n_errors, n_success
dirty = True
worker_i = zest_result.worker_i
if zest_result.is_starting:
state_message = "START"
elif zest_result.is_running:
state_message = "RUN"
else:
state_message = "DONE"
current_running_tests_by_worker_i[
worker_i
] = f"{state_message:<6s}: {zest_result.full_name}"
if not zest_result.is_running and not zest_result.is_starting:
if zest_result.error is not None:
nonlocal zest_results_by_full_name
zest_results_by_full_name = load_results(zest_results_path)
n_errors += 1
else:
n_success += 1
def update_run_state():
"""
This is the state machine that is called by the main ui thread "zest_ui_thread".
All transitions of state are made here. Other code can set the "request_*" properties
but the state only changes here.
"""
def new_state(state):
nonlocal run_state, dirty
run_state = state
dirty = True
def start_run(allow_to_run):
nonlocal runner, n_errors, n_success, n_skips, dirty, run_state
assert runner is None
# Loading can block a a while, so update render here before
run_state = LOADING
dirty = True
render()
n_errors, n_success, n_skips = 0, 0, 0
kwargs.pop("capture", None)
kwargs.pop("match_string", None)
kwargs.pop("allow_to_run", None)
runner = ZestRunnerMultiThread(
output_folder=zest_results_path,
callback=callback,
match_string=match_string,
capture=True,
allow_to_run=allow_to_run,
allow_output=False,
**kwargs,
)
run_state = RUNNING
dirty = True
render()
nonlocal request_run, request_stop, runner
nonlocal zest_results_by_full_name
nonlocal n_allowed_to_run, failed_to_run
if run_state == STOPPED:
# Tests are done. The runner_thread should be stopped
# Ways out:
# * request_end can terminate
# * request_run can start a new run
request_stop = False
if request_end:
return False
if request_run is not None:
start_run(request_run)
request_run = None
new_state(RUNNING)
elif run_state == RUNNING:
# Tests are running.
# Ways out:
# * request_stop: Goto STOPPING
# * the "runner_thread" has terminated. Goto STOPPED
# * a new run is requested before the current run has terminated. Goto STOPPING
running = runner.poll(request_stop)
time.sleep(0.05)
if not running or request_stop or request_run is not None:
new_state(STOPPING)
elif run_state == STOPPING:
# Trying to stop.
# Ways out:
# * The runner has terminated. Goto STOPPED
running = runner.poll(True)
if not running:
ran_names = {
result.full_name
for result in runner.results
}
failed_to_run = [
name
for name in runner.allow_to_run
if name not in ran_names
]
n_allowed_to_run = len(runner.allow_to_run)
runner = None
new_state(STOPPED)
zest_results_by_full_name = load_results(zest_results_path)
if not request_end:
nonlocal show_result_box
show_result_box = True
# elif run_state == WATCHING:
# if watch_timestamp != os.path.getmtime(watch_file):
# request_run = ".".join(request_watch[2])
# if request_run is not None:
# run_state = STOPPED
# watch_timestamp = None
# watch_file = None
# request_watch = None
# dirty = True
return True # True means keep running
curses.use_default_colors()
for i, p in enumerate(pal):
if i > 0:
curses.init_pair(i, pal[i][0], pal[i][1])
zest_results_path.mkdir(parents=True, exist_ok=True)
zest_results_by_full_name = load_results(zest_results_path)
while True:
try:
if not update_run_state():
break
render()
if _kbhit():
key = scr.getkey()
log(f"{key=}")
if show_result_box:
# Any key to exit box
show_result_box = False
dirty = True
if key in num_keys:
errors = _errors_from_results(zest_results_by_full_name)
error_i = (
fails_panel_page * 9 + _num_key_to_int(key) - 1
) # Because they press '1' but mean index '0'
if 0 <= error_i < len(errors):
error = errors[error_i]
if show_result_full_name == error.full_name:
# Already showing, hide it
show_result_full_name = None
else:
show_result_full_name = error.full_name
dirty = True
if key == "h":
show_result_full_name = None
dirty = True
if key == "c":
clear_output_folder(zest_results_path)
zest_results_by_full_name = load_results(zest_results_path)
show_result_full_name = None
dirty = True
if key == "q" or key == "\x1b":
request_stop = True
request_end = True
if key == "a":
request_run = "__all__"
dirty = True
if key == "f":
zest_results_by_full_name = {}
request_run = "__failed__"
dirty = True
if key == "r":
request_run = show_result_full_name
dirty = True
if key == "d":
debug_mode = not debug_mode
dirty = True
if key == "w":
if show_result_full_name is not None:
request_watch = show_result_full_name
dirty = True
if key == "m":
curses.echo()
scr.move(1, 10)
scr.clrtoeol()
s = scr.getstr(1, 10, 15).decode("ascii")
curses.noecho()
match_string = s
dirty = True
# if key == "z":
# show_result_box = not show_result_box
# dirty = True
# TODO Set this correctly
page_size = 20
if key == "KEY_DOWN":
detail_panel_scroll_top += 1
dirty = True
if key == "KEY_UP":
detail_panel_scroll_top -= 1
dirty = True
if key == "KEY_NPAGE":
# Page down
detail_panel_scroll_top += page_size
dirty = True
if key == "KEY_PPAGE":
detail_panel_scroll_top -= page_size
dirty = True
# BOUND scroll
# TODO: Defer this in render time?
# detail_panel_scroll_top = max(0, detail_panel_scroll_top - 1)
if key == "n":
fails_panel_page += 1
dirty = True
if key == "p":
fails_panel_page = max(0, fails_panel_page - 1)
dirty = True
if request_run is not None and debug_mode:
# This is the special debug mode which returns out of the
# curses-based function and tells the caller to
# run this request outside of curses in single threaded mode
return request_run, match_string
except KeyboardInterrupt:
# First press ^C asks for a graceful shutdown of child processes
# so "request_stop" is set True.
# Second press of ^C force-kill all children and exit
if not request_stop:
# First press of ^C
request_stop = True
else:
if runner:
runner.kill()
# This break exits out of the main UI thread
break
save_state()
retcode = n_errors != 0
return None, None, retcode # Not debug_request
def run(**kwargs):
"""
This is the entrypoint for the runner and uses the curses wrapper
to handle reset of terminal and exception handling.
But, when the _run returns True that means that we are in "debug_mode"
meaning that we wish to run a test WITHOUT the curses console.
"""
retcode = 0
while True:
try:
debug_request, match_string, retcode = curses.wrapper(_run, **kwargs)
if debug_request:
# This is a request to run the test in debug_request without curses
# and then start curses back up again
orig_allow_to_run = kwargs.get("allow_to_run", None)
orig_verbose = kwargs.get("verbose", None)
orig_match_string = kwargs.get("match_string", None)
try:
kwargs["allow_to_run"] = debug_request
kwargs["match_string"] = match_string
kwargs["verbose"] = 1
ZestRunnerSingleThread(**kwargs)
finally:
kwargs["match_string"] = orig_match_string
kwargs["verbose"] = orig_verbose
kwargs["allow_to_run"] = orig_allow_to_run
else:
# Clear screen
print("\033c\033[3J\033[2J\033[0m\033[H")
break
except Exception as e:
print("\033c\033[3J\033[2J\033[0m\033[H")
formatted = traceback.format_exception(
etype=type(e), value=e, tb=e.__traceback__
)
colorful_exception(e, formatted, gray_libs=False)
break
return retcode | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_console_ui.py | zest_console_ui.py |
import logging
import os
import sys
import time
import inspect
import types
import traceback
import io
import re
import json
import tempfile
import shutil
import dataclasses
import ctypes
import tempfile
from tempfile import NamedTemporaryFile
from functools import wraps
from contextlib import contextmanager
from random import shuffle
log_fp = None
log_last_time = None
def log(*args):
global log_fp, log_last_time
if log_fp is None:
log_fp = open("zest_log.txt", "a")
delta = 0
if log_last_time is not None:
delta = time.time() - log_last_time
log_last_time = time.time()
#log_fp.write(f"{delta:3.1f} " + "".join([str(i) + " " for i in args]) + "\n")
log_fp.write("".join([str(i) + " " for i in args]) + "\n")
log_fp.flush()
ansi_escape = re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]")
# Redirection is re-entrant and pauseable
libc = ctypes.CDLL(None)
redirect_depth = 0
so_orig_fd = sys.stdout.fileno() # The root level handle for stdout (typically == 1)
so_root_save_fd = None # Will be set for the root level stdout so that it can be used in pause
so_c_fd = None # The libc handle so that it can be flushed
so_curr_tmpfile = None # The top of the so stack tmpfile which is needed by pause/resume
se_orig_fd = sys.stderr.fileno() # The root level handle for stderr (typically == 2)
se_root_save_fd = None # Will be set for the root level stderr so that it can be used in pause
se_c_fd = None # The libc handle se that it can be flushed
se_curr_tmpfile = None # The top of the se stack tmpfile which is needed by pause/resume
try:
# Linux
so_c_fd = ctypes.c_void_p.in_dll(libc, 'stdout')
except ValueError:
# OSX
so_c_fd = ctypes.c_void_p.in_dll(libc, '__stdoutp')
try:
# Linux
se_c_fd = ctypes.c_void_p.in_dll(libc, 'stderr')
except ValueError:
# OSX
se_c_fd = ctypes.c_void_p.in_dll(libc, '__stderrp')
def _redirect_stdout(to_fd):
libc.fflush(so_c_fd)
sys.stdout.close()
os.dup2(to_fd, so_orig_fd)
sys.stdout = io.TextIOWrapper(os.fdopen(so_orig_fd, "wb"))
def _redirect_stderr(to_fd):
libc.fflush(se_c_fd)
sys.stderr.close()
os.dup2(to_fd, se_orig_fd)
sys.stderr = io.TextIOWrapper(os.fdopen(se_orig_fd, "wb"))
def human_readable_type_and_value(arg):
"""
Examine the type of arg and emit it in friendly ways.
"""
type_str = str(type(arg).__name__)
val_str = str(arg)
return type_str, val_str
class LogTrapFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
if hasattr(record, "spy_variable_name"):
# This record contains spy variable fields which are
# colorized differently. See def spy() to see how these fields
# go into this record.
type_str, val_str = human_readable_type_and_value(record.msg)
return f"{record.name}] {record.spy_variable_name}:{type_str} = {val_str}\n"
else:
return f"{record.name}] {record.msg}\n"
log_trap_formatter = LogTrapFormatter()
@contextmanager
def stdio_and_log_capture(should_capture):
"""
Capture stdout in a re-entrant manner. See pause_stdio_capture().
If should_capture is False it simply returns (stdout, stderr)
which simplifies conditional "with" clauses. Ie:
with stdio_capture(should_capture) as (so, se):
important_stuff(so, se)
as opposed to:
if should_capture:
with stdio_capture(should_capture) as (so, se):
important_stuff(so, se)
else:
# repeating the above
important_stuff(sys.stdout, sys.stderr)
"""
if not should_capture:
yield sys.stdout, sys.stderr, None
else:
global redirect_depth
global so_root_save_fd, so_curr_tmpfile
global se_root_save_fd, se_curr_tmpfile
so_save_fd = os.dup(so_orig_fd)
se_save_fd = os.dup(se_orig_fd)
if redirect_depth == 0:
so_root_save_fd = so_save_fd
se_root_save_fd = se_save_fd
so_tmpfile = NamedTemporaryFile(mode="w+", delete=False)
se_tmpfile = NamedTemporaryFile(mode="w+", delete=False)
lg_tmpfile = NamedTemporaryFile(mode="w+", delete=False)
so_prev_tmpfile = so_curr_tmpfile
se_prev_tmpfile = se_curr_tmpfile
so_curr_tmpfile = so_tmpfile
se_curr_tmpfile = se_tmpfile
root_logger = logging.getLogger("")
orig_root_logger_handlers = root_logger.handlers
trap_handler = logging.StreamHandler(lg_tmpfile)
trap_handler.setLevel(0)
trap_handler.setFormatter(log_trap_formatter)
root_logger.handlers = [trap_handler]
redirect_depth += 1
try:
_redirect_stdout(so_tmpfile.fileno())
_redirect_stderr(se_tmpfile.fileno())
yield (so_tmpfile, se_tmpfile, lg_tmpfile)
_redirect_stderr(se_save_fd)
_redirect_stdout(so_save_fd)
finally:
redirect_depth -= 1
so_tmpfile.close()
se_tmpfile.close()
lg_tmpfile.close()
so_curr_tmpfile = so_prev_tmpfile
se_curr_tmpfile = se_prev_tmpfile
os.close(so_save_fd)
os.close(se_save_fd)
root_logger.handlers = orig_root_logger_handlers
@contextmanager
def pause_stdio_capture():
if redirect_depth > 0:
_redirect_stdout(so_root_save_fd)
_redirect_stderr(se_root_save_fd)
yield
_redirect_stdout(so_curr_tmpfile.fileno())
_redirect_stderr(se_curr_tmpfile.fileno())
else:
yield
def strip_ansi(line):
return ansi_escape.sub("", line)
def get_class_or_module_that_defined_method(meth):
# From https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3/25959545#25959545
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__
if inspect.isfunction(meth):
cls = getattr(
inspect.getmodule(meth),
meth.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0],
)
if isinstance(cls, type):
return cls
else:
return inspect.getmodule(meth)
class TrappedException(Exception):
"""
This will be passed back from a with zest.raises(SomeException) as e.
It has one parameter: exception.
Example:
with zest.raises(SomeException) as e:
something()
assert e.exception.property == 1
"""
pass
class SkipException(Exception):
def __init__(self, full_name, reason):
self.full_name = full_name
self.reason = reason
class MockFunction:
def __init__(self, replacing_func=None):
if replacing_func is not None:
self.arg_spec = inspect.getfullargspec(replacing_func)
else:
self.arg_spec = None
self.list_of_exceptions_to_raise = None
self.exception_to_raise = None
self.list_of_values_to_return = None
self.value_to_return = None
self.hook_to_call = None
self.calls = []
def __call__(self, *args, **kwargs):
self.calls += [(args, kwargs)]
if self.hook_to_call is not None:
return self.hook_to_call(*args, **kwargs)
# EXCEPTION from series or repeatedly if requested
if self.list_of_exceptions_to_raise is not None:
if len(self.list_of_exceptions_to_raise) == 0:
raise AssertionError(
"mock was called more times than the list_of_exceptions_to_raise had elements"
)
raise self.list_of_exceptions_to_raise.pop(0)
if self.exception_to_raise is not None:
raise self.exception_to_raise
# RETURN from series or repeatedly
if self.list_of_values_to_return is not None:
if len(self.list_of_values_to_return) == 0:
raise AssertionError(
"mock was called more times than the list_of_values_to_return had elements"
)
return self.list_of_values_to_return.pop(0)
return self.value_to_return
@property
def n_calls(self):
return len(self.calls)
def reset(self):
self.calls = []
def hook(self, fn_to_call):
self.hook_to_call = fn_to_call
def returns(self, value_to_return):
self.value_to_return = value_to_return
def returns_serially(self, list_of_values_to_return):
self.list_of_values_to_return = list_of_values_to_return
def exceptions(self, exception_to_raise):
self.exception_to_raise = exception_to_raise
def exceptions_serially(self, list_of_exceptions_to_raise):
self.list_of_exceptions_to_raise = list_of_exceptions_to_raise
def called_once_with(self, *args, **kwargs):
return (
len(self.calls) == 1
and self.calls[0][0] == args
and self.calls[0][1] == kwargs
)
def called(self):
return len(self.calls) > 0
def called_once(self):
return len(self.calls) == 1
def not_called(self):
return len(self.calls) == 0
def normalized_calls(self):
"""
Converts the calls into a list of kwargs by combining the args and kwargs.
This simplifies assert handling in many cases where you don't care if
the arguments were passed by position of name.
"""
arg_spec = [arg for arg in self.arg_spec.args if arg != "self"]
# arg_spec is now a list of all positional argument names that the real function
# expects (excluding special *, **)
normalized_calls = []
for by_pos, by_keyword in self.calls:
# COVERT all the arguments that were passed in without keywords...
normalized_args = {
arg_spec[i]: passed_value for i, passed_value in enumerate(by_pos)
}
# ADD in those arguments that were passed by keyword.
normalized_args.update(by_keyword)
normalized_calls += [normalized_args]
return normalized_calls
def normalized_call(self):
"""Used when you expect only one call and are checking some argument"""
assert self.n_calls == 1
return self.normalized_calls()[0]
def called_once_with_kws(self, **kws):
"""
Returns True if the mocked was called only once and with its args and kwargs
normalized into the kws specified as the arguments to this func.
"""
if self.n_calls != 1:
return False
return kws == self.normalized_calls()[0]
class JSONDataClassEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, BaseException):
return f"{o.__class__.__name__}(\"{str(o)}\")"
# if isinstance(o, ZestResult):
# if o.error is not None:
# try:
# dataclasses.asdict(o)
# except Exception as e:
# # If it can not be encoded convert to str
# o.error = Exception(f"{o.error.__class__.__name__}: \"{str(o.error)}\"")
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
return super().default(o)
@dataclasses.dataclass
class ZestResult:
call_stack: list
full_name: str
short_name: str
error: str = None
error_formatted: str = None
elapsed: float = None
skip: str = None
child_skip: bool = False
stdout: str = None
stderr: str = None
logs: str = None
source: str = None
pid: int = None
is_running: bool = False
is_starting: bool = False
worker_i: int = 0
def dumps(self):
return json.dumps(self, cls=JSONDataClassEncoder)
@classmethod
def loads(cls, s):
return ZestResult(**json.loads(s))
def check_allow_to_run(allow_list, test_name_parts):
full_name = ".".join(test_name_parts)
if allow_list is None:
allow = True
else:
allow = False
for allow_name in allow_list:
if allow_name == "__all__":
allow = True
break
elif allow_name.endswith("."):
allow_parts = allow_name.split(".")[0:-1]
n_allow_parts = len(allow_parts)
allow = all([
test_name_parts[i] == allow_parts[i]
for i in range(n_allow_parts)
])
if allow:
break
elif full_name == allow_name:
allow = True
break
return allow
class zest:
"""
This is a helper to make calling a little bit cleaner.
Example:
from plaster.tools.zest.zest import zest
def some_test():
def test_1():
with zest.raises(TypeError):
example.call_some_global_func()
def test_2():
with zest.mock(example.some_global_func):
example.some_global_func()
zest()
zest(some_test)
"""
# TODO: Convert these to just use a list of zest_results
_call_log = []
_call_stack = []
_call_errors = []
_call_warnings = []
_call_tree = []
_test_start_callback = None
_test_stop_callback = None
_mock_stack = []
_allow_to_run = None
_disable_shuffle = False
_capture = False
_bypass_skip = []
_current_error = None # Useful for checks by _after
_common_tmp = None
_tmp_root = None
# _bubble_exceptions must default to True so that if you naively call
# a zest from outside of the zest runner errors will bubble
# the zest_runner will overload this to False
_bubble_exceptions = True
@staticmethod
def reset(disable_shuffle=False, bypass_skip=None, common_tmp=None, tmp_root=None, capture=None):
zest._call_log = []
zest._call_stack = []
zest._call_errors = []
zest._call_warnings = []
zest._call_tree = []
zest._test_start_callback = None
zest._test_stop_callback = None
zest._mock_stack = []
zest._allow_to_run = None
zest._capture = capture
zest._disable_shuffle = disable_shuffle
zest._bypass_skip = [] if bypass_skip is None else bypass_skip.split(":")
zest._common_tmp = common_tmp
zest._tmp_root = tmp_root
@staticmethod
def current_test_name():
return zest._call_stack[-1]
@staticmethod
def current_test_full_name():
return ".".join(zest._call_stack)
@staticmethod
def current_test_error():
"""
Current error is a useful state to check in _after()
"""
return zest._current_error
# TODO: Sort out all the naming conventions for this
# @staticmethod
# def parameter_list(params_list):
# """
# Params list is a list of tuples that will be passed to the *args.
# If param_list is not a list of tuples they will be converted to tuples
# """
#
# def decorator(fn):
# @wraps(fn)
# def wrapper(*params, **kwargs):
# fn(*params, **kwargs)
#
# _params_list = [
# params if isinstance(params, tuple) else (params,)
# for params in params_list
# ]
# setattr(wrapper, "params_list", _params_list)
#
# return wrapper
#
# return decorator
@staticmethod
def run_in_subprocess():
"""
Use this decorator when the child zest might die or get killed
abnormaly such as if a C module were to exit without allowing
python to do proper exit handling.
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
child_pid = os.fork()
if child_pid == 0:
# child process
fn(*args, **kwargs)
sys.exit(0)
pid, ret_code = os.waitpid(child_pid, 0)
if ret_code != 0:
full_name = ".".join(zest._call_stack)
raise Exception(f"Child subprocess '{full_name}' died unexpectedly")
return wrapper
return decorator
@staticmethod
def skip(reason=None):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
full_name = ".".join(zest._call_stack)
if full_name not in zest._bypass_skip:
raise SkipException(full_name, reason)
else:
fn(*args, **kwargs)
setattr(wrapper, "skip", True)
setattr(wrapper, "skip_reason", reason)
return wrapper
return decorator
@staticmethod
def group(name):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
setattr(wrapper, "group", name)
return wrapper
return decorator
@staticmethod
def retry(n_tries):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
tries = n_tries
while tries > 0:
try:
return fn(*args, **kwargs)
except Exception as e:
tries -= 1
if tries == 0:
raise e
return wrapper
return decorator
@staticmethod
def _setup_mock(symbol, substitute_fn=None):
if not callable(symbol):
raise AssertionError(f"Unmockable symbol {symbol} (must be callable)")
old = None
klass = None
if substitute_fn is not None:
new = substitute_fn
else:
new = MockFunction(symbol)
klass = get_class_or_module_that_defined_method(symbol)
# if isinstance(klass, types.ModuleType):
# frame = inspect.currentframe()
# module = inspect.getmodule(frame.f_back.f_back)
# for name, obj in inspect.getmembers(module):
# if (
# hasattr(obj, "__qualname__")
# and obj.__qualname__ == symbol.__qualname__
# ):
# raise AssertionError(
# f"You are mocking the module-level symbol {symbol.__qualname__} which "
# f"is imported directly into the test module. You should instead "
# f"import the containing module and then mock the sub-symbol."
# )
old = getattr(klass, symbol.__name__)
setattr(klass, symbol.__name__, new)
return old, klass, new
@staticmethod
def _clear_stack_mocks():
for (klass, symbol, old, new, reset_before_each) in zest._mock_stack[-1]:
setattr(klass, symbol.__name__, old)
@staticmethod
def stack_mock(
symbol,
reset_before_each=True,
returns=None,
returns_serially=None,
substitute_fn=None,
):
old, klass, new = zest._setup_mock(symbol, substitute_fn=substitute_fn)
if returns is not None:
new.returns(returns)
elif returns_serially is not None:
new.returns_serially(returns_serially)
zest._mock_stack[-1] += [(klass, symbol, old, new, reset_before_each)]
return new
@staticmethod
@contextmanager
def mock(symbol, returns=None, hook=None):
old, klass, new = None, None, None
try:
old, klass, new = zest._setup_mock(symbol)
if returns is not None:
new.returns(returns)
if hook is not None:
new.hook(hook)
yield new
finally:
if klass and old:
setattr(klass, symbol.__name__, old)
@staticmethod
@contextmanager
def raises(expected_exception=Exception, **kwargs):
"""
Use this in the inner most test. Do not attempt to encapsulate
more than one test with this context. See README.
The kwargs can include statements about the exception
in_args=value
in_{key}=substring
key=val
"""
got_expected_exception = False
trapped_exception = TrappedException()
try:
yield trapped_exception
except expected_exception as actual_e:
trapped_exception.exception = actual_e
got_expected_exception = True
if got_expected_exception:
# Check keys in the exception
for key, val in kwargs.items():
if key.startswith("in_args"):
for arg in trapped_exception.exception.args:
if val in arg:
break
else:
raise AssertionError(
f"expected exception to have '{val}' in some arg but not found in "
f"{trapped_exception.exception.args}"
)
elif key.startswith("in_"):
key = key[3:]
if val not in getattr(trapped_exception.exception, key):
raise AssertionError(
f"expected exception to have '{val}' in key '{key}'. "
f"Found '{getattr(trapped_exception.exception, key)}'"
)
else:
if val != getattr(trapped_exception.exception, key):
raise AssertionError(
f"expected exception to have '{key}' == '{val}'. "
f"Found '{getattr(trapped_exception.exception, key)}'"
)
else:
raise AssertionError(
f"expected {expected_exception} but nothing was raised."
)
# If some other exception was raised that should just bubble as usual
@staticmethod
def current_test_name():
return zest._call_stack[-1]
@staticmethod
def do(
*funcs, test_start_callback=None, test_stop_callback=None, allow_to_run=None
):
"""
This is the entrypoint of any zest at any depth.
It is called by zest_runner in the case of "root" level
tests. But each of those tests can call this recursively.
Eg:
def zest_test1(): # <-- This is the root level recursion called from zest_runner
def it_does_x():
a = b()
assert a == 1
def it_does_y():
a = c()
assert a == 2
def it_does_y1():
assert something()
zest() # <-- This is the second sub-root level recursion
zest() # <-- This is the first sub-root level recursion
This function works by looking up the call stack and analyzing
the caller's scope to find functions that do not start with underscore
and for two special underscore function: _before and _after...
Call _before() (if defined) before each test
Call _after() (if defined) after each test
The class member _allow_to_run potentially contains a list of
zests that are allowed to execute in dotted form. Eg using above:
["zest_test1.it_does_y.it_does_y1"]
This example would mean that "zest_test1" would run and "zest_test1.it_does_y"
and "zest_test1.it_does_y.it_does_y1"
When a parent level is given, all its children will run too.
Eg: ["zest_test1.it_does_y"] means that it_does_y1 will run too.
"""
# log(f"STARTING ZEST {zest._test_stop_callback=}")
prev_test_start_callback = None
prev_test_stop_callback = None
prev_allow_to_run = None
if test_start_callback is not None:
prev_test_start_callback = zest._test_start_callback
zest._test_start_callback = test_start_callback
if test_stop_callback is not None:
prev_test_stop_callback = zest._test_stop_callback
zest._test_stop_callback = test_stop_callback
if allow_to_run is not None:
prev_allow_to_run = zest._allow_to_run
zest._allow_to_run = allow_to_run
try:
callers_special_local_funcs = {}
if len(funcs) > 0:
funcs_to_call = [
(func.__name__, func)
for func in funcs
if isinstance(func, types.FunctionType)
and not func.__name__.startswith("_")
]
else:
# Extract test functions from caller's scope
frame = inspect.currentframe()
try:
zest_module_name = inspect.getmodule(frame).__name__
try:
while inspect.getmodule(frame).__name__ == zest_module_name:
frame = frame.f_back
except AttributeError:
# Handle the case in a notebook of running a test directly
# such as:
# from zest import zest
# def zest_centering():
# def it_centers_no_noise():
# print("RAN1")
#
# zest()
# zest_centering()
#
# This raises AttribubteError when the __name__ is examined
# from a None module.
pass
context = frame.f_locals
callers_special_local_funcs = {
name: func
for name, func in context.items()
if isinstance(func, types.FunctionType)
and name.startswith("_")
and not isinstance(func, MockFunction)
}
funcs_to_call = [
(name, func)
for name, func in context.items()
if isinstance(func, types.FunctionType)
and not name.startswith("_")
and not isinstance(func, MockFunction)
]
finally:
del frame
# Randomly shuffle test order to reveal accidental order dependencies.
# TASK: make this a flag that is called during staging (w/ multi-run)
funcs_to_call = sorted(funcs_to_call, key=lambda x: x[0])
if len(funcs_to_call) > 1:
if not zest._disable_shuffle:
shuffle(funcs_to_call)
_begin = callers_special_local_funcs.get("_begin")
if _begin is not None:
raise ValueError(
"A _begin function was declared. Maybe you meant _before?"
)
for name, func in funcs_to_call:
if len(zest._mock_stack) > 0:
for mock_tuple in zest._mock_stack[-1]:
if mock_tuple[4]: # if reset_before_each is set
mock_tuple[3].reset() # Tell the mock to reset
with stdio_and_log_capture(zest._capture) as (so, se, lg):
zest._call_stack += [name]
zest._current_error = None
try:
full_name = ".".join(zest._call_stack)
allow = check_allow_to_run(zest._allow_to_run, zest._call_stack)
if not allow:
# if (
# zest._allow_to_run is not None
# and full_name not in zest._allow_to_run
# and zest._allow_to_run != "__all__"
# ):
zest._call_stack.pop()
continue
except Exception as e:
log(f"EXCEPTION during allow to check run. NAME {name} e {e}")
zest._call_stack.pop()
continue
pre_cwd = os.getcwd()
remove_tmp_dir = None
try:
if zest._common_tmp is not None:
cwd = zest._common_tmp
else:
# Create a tmp folder per test
tmp_root = zest._tmp_root or "/tmp"
cwd = tempfile.mkdtemp(dir=tmp_root)
remove_tmp_dir = cwd
# Set each test into the correct tmp folder
os.chdir(cwd)
# for params in params_list:
_before = callers_special_local_funcs.get("_before")
if _before:
try:
_before()
except Exception as e:
zest._call_errors += [(e, zest._call_stack.copy())]
s = (
f"There was an exception while running '_before()' in test '{name}'. "
f"This may mean that the sub-tests are not enumerated and therefore can not be run."
)
zest._call_warnings += [s]
if zest._bubble_exceptions:
raise e
try:
zest._call_tree += [full_name]
zest._call_log += [full_name]
if zest._test_start_callback:
with pause_stdio_capture():
zest._test_start_callback(
ZestResult(
call_stack=zest._call_stack,
full_name=full_name,
short_name=zest._call_stack[-1],
source=func.__code__.co_filename,
pid=os.getpid(),
is_running=True,
is_starting=False,
)
)
error = None
error_formatted = None
skip_reason = None
start_time = time.time()
try:
zest._mock_stack += [[]]
try:
func()
except SkipException as e:
skip_reason = e.reason
zest._clear_stack_mocks()
zest._mock_stack.pop()
except Exception as e:
error = e
error_formatted = traceback.format_exception(
etype=type(error), value=error, tb=error.__traceback__
)
zest._call_errors += [1]
# zest._call_errors += [
# (e, error_formatted, zest._call_stack.copy())
# ]
zest._current_error = e
if zest._bubble_exceptions:
raise e
finally:
stop_time = time.time()
try:
sys.stdout.flush()
sys.stderr.flush()
so.flush()
se.flush()
if lg is not None:
lg.flush()
except OSError:
pass
captured_so = None
try:
so.seek(0, io.SEEK_SET)
captured_so = so.read()
except io.UnsupportedOperation:
# This happens if so is actually sys.stdout
pass
except OSError:
pass
captured_se = None
try:
se.seek(0, io.SEEK_SET)
captured_se = se.read()
except io.UnsupportedOperation:
# This happens if se is actually sys.stderr
pass
captured_lg = None
if lg is not None:
try:
lg.seek(0, io.SEEK_SET)
captured_lg = lg.read()
except io.UnsupportedOperation:
# This happens if so is actually sys.stdout
pass
except OSError:
pass
if zest._test_stop_callback:
if error is not None:
error = f"{error.__class__.__name__}: \"{str(error)}\""
zest_result = ZestResult(
call_stack=zest._call_stack,
full_name=".".join(zest._call_stack),
short_name=zest._call_stack[-1],
error=error,
error_formatted=error_formatted,
elapsed=stop_time - start_time,
skip=skip_reason,
stdout=captured_so if captured_so is not None else None,
stderr=captured_se if captured_se is not None else None,
logs=captured_lg if captured_lg is not None else None,
source=func.__code__.co_filename,
pid=os.getpid(),
is_running=False,
is_starting=False,
)
with pause_stdio_capture():
zest._test_stop_callback(zest_result)
# If a test was skipped we need to mark it's possible
# allowed children as skipped too
if skip_reason is not None: #and allow_to_run is not None:
for allowed in zest._allow_to_run:
if allowed.startswith(full_name + "."):
zest_result = ZestResult(
call_stack=allowed.split("."),
full_name=allowed,
short_name=allowed.split(".")[-1],
error=None,
error_formatted=None,
elapsed=0.0,
skip=f"parent was skipped",
child_skip=True,
stdout=None,
stderr=None,
logs=None,
source=func.__code__.co_filename,
pid=os.getpid(),
is_running=False,
is_starting=False,
)
with pause_stdio_capture():
zest._test_stop_callback(zest_result)
_after = callers_special_local_funcs.get("_after")
if _after:
_after()
finally:
zest._call_stack.pop()
finally:
# Clean up tmp folders if needed
if remove_tmp_dir:
try:
shutil.rmtree(remove_tmp_dir)
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
os.chdir(pre_cwd)
finally:
if prev_test_start_callback is not None:
zest._test_start_callback = prev_test_start_callback
if prev_test_stop_callback is not None:
zest._test_stop_callback = prev_test_stop_callback
if prev_allow_to_run is not None:
zest._allow_to_run = prev_allow_to_run
def __init__(self, *args, **kwargs):
self.do(*args, **kwargs) | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest.py | zest.py |
import sys
import os
import io
import ctypes
from tempfile import NamedTemporaryFile
import time
from dataclasses import dataclass
from contextlib import contextmanager
log_fp = None
log_last_time = None
def log(*args):
global log_fp, log_last_time
if log_fp is None:
log_fp = open("log.txt", "a")
delta = 0
if log_last_time is not None:
delta = time.time() - log_last_time
log_last_time = time.time()
log_fp.write(f"{delta:3.1f} " + "".join([str(i) + " " for i in args]) + "\n")
log_fp.flush()
# Redirection is re-entrant and pauseable
libc = ctypes.CDLL(None)
redirect_depth = 0
so_orig_fd = sys.stdout.fileno() # The root level handle for stdout (typically == 1)
so_root_save_fd = None # Will be set for the root level stdout so that it can be used in pause
so_c_fd = None # The libc handle so that it can be flushed
so_curr_tmpfile = None # The top of the so stack tmpfile which is needed by pause/resume
se_orig_fd = sys.stderr.fileno() # The root level handle for stderr (typically == 2)
se_root_save_fd = None # Will be set for the root level stderr so that it can be used in pause
se_c_fd = None # The libc handle se that it can be flushed
se_curr_tmpfile = None # The top of the se stack tmpfile which is needed by pause/resume
try:
# Linux
so_c_fd = ctypes.c_void_p.in_dll(libc, 'stdout')
except ValueError:
# OSX
so_c_fd = ctypes.c_void_p.in_dll(libc, '__stdoutp')
try:
# Linux
se_c_fd = ctypes.c_void_p.in_dll(libc, 'stderr')
except ValueError:
# OSX
se_c_fd = ctypes.c_void_p.in_dll(libc, '__stderrp')
def _redirect_stdout(to_fd):
libc.fflush(so_c_fd)
sys.stdout.close()
os.dup2(to_fd, so_orig_fd)
sys.stdout = io.TextIOWrapper(os.fdopen(so_orig_fd, "wb"))
def _redirect_stderr(to_fd):
libc.fflush(se_c_fd)
sys.stderr.close()
os.dup2(to_fd, se_orig_fd)
sys.stderr = io.TextIOWrapper(os.fdopen(se_orig_fd, "wb"))
@contextmanager
def stdio_capture(should_capture):
"""
Capture stdout in a re-entrant manner. See pause_stdio_capture().
If should_capture is False it simply returns (stdout, stderr)
which simplifies conditional "with" clauses. Ie:
with stdio_capture(should_capture) as (so, se):
important_stuff(so, se)
as opposed to:
if should_capture:
with stdio_capture(should_capture) as (so, se):
important_stuff(so, se)
else:
# repeating the above
important_stuff(sys.stdout, sys.stderr)
"""
if not should_capture:
yield sys.stdout, sys.stderr
else:
global redirect_depth
global so_root_save_fd, so_curr_tmpfile
global se_root_save_fd, se_curr_tmpfile
so_save_fd = os.dup(so_orig_fd)
se_save_fd = os.dup(se_orig_fd)
if redirect_depth == 0:
so_root_save_fd = so_save_fd
se_root_save_fd = se_save_fd
so_tmpfile = NamedTemporaryFile(mode="w+b")
se_tmpfile = NamedTemporaryFile(mode="w+b")
so_prev_tmpfile = so_curr_tmpfile
se_prev_tmpfile = se_curr_tmpfile
so_curr_tmpfile = so_tmpfile
se_curr_tmpfile = se_tmpfile
redirect_depth += 1
try:
_redirect_stdout(so_tmpfile.fileno())
_redirect_stderr(se_tmpfile.fileno())
yield (so_tmpfile, se_tmpfile)
_redirect_stderr(se_save_fd)
_redirect_stdout(so_save_fd)
finally:
redirect_depth -= 1
so_tmpfile.close()
se_tmpfile.close()
so_curr_tmpfile = so_prev_tmpfile
se_curr_tmpfile = se_prev_tmpfile
os.close(so_save_fd)
os.close(se_save_fd)
@contextmanager
def pause_stdio_capture():
if redirect_depth > 0:
log(f" pause redirect")
_redirect_stdout(so_root_save_fd)
_redirect_stderr(se_root_save_fd)
yield
log(f" resume redirect")
_redirect_stdout(so_curr_tmpfile.fileno())
_redirect_stderr(se_curr_tmpfile.fileno())
else:
yield
# The tricky thing is that I have to support recursion
depth = 0
def start_test():
global depth
with stdio_capture(True) as (so, se):
with pause_stdio_capture():
print(f"Running {depth}")
print(f"This is the test stdout at {depth=}", file=sys.stdout)
print(f"This is the test stderr at {depth=}", file=sys.stderr)
if depth < 1:
depth += 1
start_test()
depth -= 1
sys.stdout.flush()
sys.stderr.flush()
so.flush()
se.flush()
so.seek(0, io.SEEK_SET)
se.seek(0, io.SEEK_SET)
captured_so = ""
try:
captured_so = so.read()
except io.UnsupportedOperation:
# This happens if so is actually sys.stdout
pass
captured_se = ""
try:
captured_se = se.read()
except io.UnsupportedOperation:
# This happens if se is actually sys.stderr
pass
with pause_stdio_capture():
print(f"Back {depth}")
print(f"Capture stdout was '{captured_so}'")
print(f"Capture stderr was '{captured_se}'")
if __name__ == "__main__":
start_test() | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/explore.py | explore.py |
import logging
import time
import json
import os
import re
import io
import random
import sys
import signal
import multiprocessing
import multiprocessing.pool
import traceback
import pathlib
from multiprocessing import Queue
from queue import Empty
from collections import deque
from pathlib import Path
from zest import zest
from zest.zest import ZestResult
from zest.zest_runner_base import ZestRunnerBase, emit_zest_result, open_event_stream
from zest import zest_finder
from zest.zest import log
from subprocess import Popen, DEVNULL
from dataclasses import dataclass
from contextlib import redirect_stdout, redirect_stderr
from zest import colors
from zest.zest_display import (
s,
display_complete,
display_timings,
display_warnings,
display_start,
display_stop,
display_error,
error_header,
)
# Nondaemonic
# based on https://stackoverflow.com/a/53180921
class NoDaemonProcess(multiprocessing.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NoDaemonContext(type(multiprocessing.get_context())):
Process = NoDaemonProcess
class NestablePool(multiprocessing.pool.Pool):
def __init__(self, *args, **kwargs):
kwargs["context"] = NoDaemonContext()
super(NestablePool, self).__init__(*args, **kwargs)
def read_zest_result_line(fd):
while True:
line = fd.readline()
if not line:
break
if not isinstance(line, str):
line = line.decode()
yield ZestResult.loads(line)
def clear_output_folder(output_folder):
"""
Delete all results in the output folder
"""
output_folder = Path(output_folder)
assert output_folder is not None and str(output_folder) != "/"
for res_path in os.listdir(output_folder):
res_path = output_folder / res_path
if str(res_path).endswith(".evt"):
res_path.unlink()
def _do_work_order(
root_name,
module_name,
full_path,
output_folder,
capture,
allow_to_run,
disable_shuffle,
bypass_skip,
):
zest_result_to_return = None
# In a new process, reset the _bubble_exceptions
zest._bubble_exceptions = False
zest.reset(disable_shuffle, bypass_skip)
with open_event_stream(output_folder, root_name) as event_stream:
# It may be very slow to have the load_module here in the child
# processes as it means that each child will have to load_module
# and get no benefit from caching of modules. It might be better
# to move this in to the parent process
def event_callback(zest_result):
"""
This callback occurs anytime a sub-zest starts or stops.
"""
emit_zest_result(zest_result, event_stream)
_do_work_order.queue.put(zest_result)
nonlocal zest_result_to_return
zest_result_to_return = zest_result
try:
event_callback(ZestResult(full_name=root_name, is_starting=True, call_stack=[], short_name=root_name, pid=os.getpid()))
root_zest_func = zest_finder.load_module(root_name, module_name, full_path)
zest._capture = capture
zest.do(
root_zest_func,
test_start_callback=event_callback,
test_stop_callback=event_callback,
allow_to_run=allow_to_run,
)
except Exception as e:
e._formatted = traceback.format_exception(
etype=type(e), value=e, tb=e.__traceback__
)
e._root_name = root_name
_do_work_order.queue.put(e)
return zest_result_to_return
def _do_worker_init(queue):
_do_work_order.queue = queue
class ZestRunnerMultiThread(ZestRunnerBase):
state_messages = ["DONE", "RUNNING"]
def n_live_procs(self):
return len([proc for proc in self.procs if proc.exit_code is None])
def kill(self):
"""
Force-kill all children do not wait for them to terminate
"""
if self.pool:
for proc in self.pool._pool:
if proc.exitcode is None:
try:
os.kill(proc.pid, signal.SIGKILL)
except ProcessLookupError:
log(f"KILL failed {proc.pid}")
def poll(self, request_stop):
"""
Check the status of all running threads
Returns:
True if there's more to do
False if everything is done
Usage:
def callback(zest_result):
...
runner = ZestRunnerMultiThread(callback=callback, ...)
while runner.poll(request_stop):
if ...: request_stop = True
"""
if request_stop and self.pool is not None:
self.pool.terminate()
self.pool.join()
return False
try:
while True:
zest_result = self.queue.get_nowait()
if isinstance(zest_result, Exception):
raise zest_result
assert isinstance(zest_result, ZestResult)
# The child only knows its pid but we need to know which pool
# process that pid maps to. The pids change as the multiprocess
# pool logic may kill off child processes and re-use others.
# So here we monitor the self.pool._pool which is a list
# of Process objects that contain the pids
for i, p in enumerate(self.pool._pool):
self.pid_to_worker_i[p.pid] = i
worker_i = self.pid_to_worker_i.get(zest_result.pid)
if worker_i is not None:
zest_result.worker_i = self.pid_to_worker_i[zest_result.pid]
# else:
# log("Unknown zest_result.worker_i", zest_result.pid, self.pid_to_worker_i)
self.worker_status[zest_result.worker_i] = zest_result
if not zest_result.is_running and not zest_result.is_starting:
self.results += [zest_result]
if zest_result.skip is not None:
self.n_skips += 1
elif zest_result.error is not None:
self.n_errors += 1
else:
self.n_successes += 1
if self.callback is not None:
self.callback(zest_result)
except Empty:
pass
if (
self.map_results is not None
and self.map_results.ready()
and self.queue.empty()
):
self.pool.join()
return False
return True
def draw_status(self):
"""
Draw worker status one line per worker with a Clear to EOL.
If run us complete, then clear all those lines
Return the cursor to the start line
"""
def cursor_move_up(n_lines):
sys.stdout.write(f"\033[{n_lines}A")
def cursor_clear_to_eol_and_newline():
sys.stdout.write("\033[K\n")
def write_line(line):
if len(line) > 0:
assert line[-1] != "\n"
sys.stdout.write(line)
cursor_clear_to_eol_and_newline()
for i, worker in enumerate(self.worker_status):
if self.run_complete:
write_line("")
else:
if worker is not None:
write_line(
f"{i:2d}: {self.state_messages[worker.is_running]:<8s} {worker.full_name}"
)
else:
write_line(f"{i:2d}: NOT STARTED")
write_line(
f"{colors.green}{self.n_successes} {colors.red}{self.n_errors} {colors.yellow}{self.n_skips} {colors.reset}"
)
cursor_move_up(len(self.worker_status) + 1)
def draw_complete(self):
display_complete("", self.results, self.allow_to_run)
if self.verbose > 1:
# When verbose then AFTER the multithreads have all had a chance
# to run THEN we can dump the run logs.
# This is particularly important for the advanced tests so that
# they can see what ran.
for result in self.results:
display_start(result.full_name, None, None, self.add_markers)
display_stop(result.error, result.elapsed, result.skip, None, None)
def message_pump(self):
if self.retcode != 0:
# CHECK that zest_find did not fail
return self
request_stop = False
last_draw = 0.0
while True:
try:
# if request_stop = True
# TODO
if self.allow_output and time.time() - last_draw > 0.5:
self.draw_status()
last_draw = time.time()
if not self.poll(request_stop):
self.retcode = self.n_errors
self.run_complete = True
break
except KeyboardInterrupt:
request_stop = True
self.retcode = 1
if self.allow_output:
self.draw_status()
self.draw_complete()
def __init__(self, n_workers=2, allow_output=True, **kwargs):
super().__init__(**kwargs)
if self.retcode != 0:
# CHECK that zest_find did not fail
return
self.n_workers = n_workers
self.pid_to_worker_i = {}
self.worker_status = [None] * self.n_workers
self.pool = None
self.queue = Queue()
self.map_results = None
self.allow_output = allow_output
self.run_complete = False
self.n_errors = 0
self.n_successes = 0
self.n_skips = 0
work_orders = []
for (root_name, (module_name, package, full_path),) in self.root_zests.items():
work_orders += [
(
root_name,
module_name,
full_path,
self.output_folder,
self.capture,
self.allow_to_run,
self.disable_shuffle,
self.bypass_skip,
)
]
if self.is_unlimited_run():
# Clear evt caches
clear_output_folder(self.output_folder)
# multiprocessing.Queue can only be passed via the pool initializer, not as an arg.
self.pool = NestablePool(self.n_workers, _do_worker_init, [self.queue])
self.map_results = self.pool.starmap_async(_do_work_order, work_orders)
self.pool.close() | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_runner_multi_thread.py | zest_runner_multi_thread.py |
import time
import sys
import os
import re
import tempfile
import io
from zest import zest
from zest.zest import log
from zest import zest_finder
from zest.zest_runner_base import ZestRunnerBase, emit_zest_result, open_event_stream
from zest import zest_display
from zest import colors
from zest.zest_display import (
s,
display_complete,
display_timings,
display_warnings,
display_start,
display_stop,
display_error,
display_abbreviated,
)
class ZestRunnerSingleThread(ZestRunnerBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.retcode != 0:
# CHECK that zest_find did not fail
return
last_depth = 0
curr_depth = 0
event_stream = None
# Event functions are callbacks from zest
# ---------------------------------------------------------------------------------
def event_test_start(zest_result):
nonlocal last_depth, curr_depth
if self.verbose >= 2:
curr_depth = len(zest_result.call_stack) - 1
display_start(
zest_result.short_name, last_depth, curr_depth, self.add_markers
)
last_depth = curr_depth
def event_test_stop(zest_result):
nonlocal last_depth, curr_depth
emit_zest_result(zest_result, event_stream)
self.results += [zest_result]
curr_depth = len(zest_result.call_stack) - 1
if self.verbose >= 2 and not zest_result.child_skip:
display_stop(
zest_result.error,
zest_result.elapsed,
zest_result.skip,
last_depth,
curr_depth,
)
elif self.verbose == 1:
display_abbreviated(zest_result.error, zest_result.skip)
# LAUNCH root zests
for (root_name, (module_name, package, full_path)) in self.root_zests.items():
with open_event_stream(self.output_folder, root_name) as event_stream:
root_zest_func = zest_finder.load_module(root_name, module_name, full_path)
zest.do(
root_zest_func,
test_start_callback=event_test_start,
test_stop_callback=event_test_stop,
allow_to_run=self.allow_to_run,
)
# COMPLETE
if self.verbose > 0:
display_complete(self.root, self.results, self.allow_to_run)
if self.verbose > 1:
display_timings(self.results)
if self.verbose > 0:
display_warnings(zest._call_warnings)
self.retcode = 0 if len(zest._call_errors) == 0 else 1 | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_runner_single_thread.py | zest_runner_single_thread.py |
import time
import os
import sys
import argparse
import pathlib
import json
import logging
import logging.config
from pathlib import Path
from zest import zest_finder
from zest.zest_runner_single_thread import ZestRunnerSingleThread
from zest.zest_runner_multi_thread import ZestRunnerMultiThread
from zest.zest_display import display_find_errors, display_complete
from zest import zest_console_ui
from zest.zest import log
from . import __version__
def main():
log("CLI MAIN START")
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument("--version", action="store_true",
help="Show version and exit",
)
parser.add_argument("--output_folder", nargs="?", default=".zest_results",
help="Where to store results",
)
parser.add_argument("--root", nargs="?", default=os.getcwd(),
help="Optional root directory to search (default: cwd).",
)
parser.add_argument("--include_dirs", nargs="?", default=".",
help="Optional colon-delimited list of directories to search.",
)
parser.add_argument("--allow_files", nargs="?",
help=(
"Optional colon-delimited list of filenames "
"that will be allowed to run. Special: '__all__'."
)
)
parser.add_argument("--allow_to_run", nargs="?", default="__all__",
help=(
"Optional colon-delimited list of full test names (eg: 'zest_name.it_tests') "
"that will be allowed to run. Specials: '__all__', '__failed__'."
)
)
parser.add_argument("match_string", type=str, nargs="?",
help="Optional substring that must be present in a test to run."
)
parser.add_argument("--exclude_string", type=str, action="append", nargs="*",
help="Optional substring that must be absent in a test to run."
)
parser.add_argument("--verbose", default=1, type=int,
help="0=silent, 1=dot-mode, 2=run-trace 3=full-trace",
)
parser.add_argument("--disable_shuffle", action="store_true",
help="Disable the shuffling of test order.",
)
parser.add_argument("--n_workers", default=1, type=int,
help="Number of parallel processes.",
)
parser.add_argument("--capture", action="store_true",
help="Capture all stdio.",
)
parser.add_argument("--ui", action="store_true",
help="Use console UI.",
)
parser.add_argument("--no_ui", action="store_true",
help="Suppress the console UI.",
)
parser.add_argument("--go", action="store_true",
help="Use console UI and start the run upon entry.",
)
parser.add_argument("--debug_mode", action="store_true",
help="Start console in debug_mode.",
)
parser.add_argument("--add_markers", action="store_true",
help="For internal debugging."
)
parser.add_argument("--bypass_skip", nargs="?", default="",
help="For internal debugging."
)
parser.add_argument("--groups", nargs="?",
help="Optional colon-delimited list of groups to run.",
)
parser.add_argument("--exclude_groups", nargs="?",
help="Optional colon-delimited list of groups to exclude.",
)
parser.add_argument("--common_tmp", nargs="?", type=str,
help="If specified, use this folder as CWD for all. Default is a folder per-test",
)
parser.add_argument("--tmp_root", nargs="?", type=str, default="/tmp",
help="If specified, use this folder as the root for all per-tests",
)
parser.add_argument("--hook_start", nargs="?", type=str, default=None,
help="If specified, the module.function() will be called before run. Good for log setup. In form full_path/to.py:func()",
)
parser.add_argument("--preview", action="store_true",
help="Show tests that would run.",
)
# fmt: on
kwargs = vars(parser.parse_args())
if kwargs.pop("version", None):
print(__version__)
sys.exit(0)
# zest needs a way to ask the application to setup logging
hook = kwargs.get("hook_start")
if hook is not None:
hook_file, func_name = hook.split(":")
hook_start_func = zest_finder.load_module(func_name, "", hook_file)
hook_start_func()
if not kwargs.pop("no_ui", False) and (kwargs.pop("ui", False) or kwargs.get("go", False)):
retcode = zest_console_ui.run(**kwargs)
else:
if kwargs.get("n_workers") > 1:
runner = ZestRunnerMultiThread(**kwargs)
from zest.zest import zest
runner.message_pump()
else:
runner = ZestRunnerSingleThread(**kwargs)
retcode = runner.retcode
sys.exit(retcode)
if __name__ == "__main__":
allow_reentrancy = True
if allow_reentrancy:
main()
else:
pidfile = f"{Path.home()}/zest_runner.pid"
pid = str(os.getpid())
if os.path.isfile(pidfile):
print(f"{pidfile} already exists {sys.argv}", file=sys.stderr)
sys.exit(1)
with open(pidfile, "w") as f:
f.write(pid)
try:
main()
finally:
found_pid = 0
with open(pidfile) as f:
try:
found_pid = f.read()
except Exception as e:
pass
if str(found_pid) == str(pid):
os.unlink(pidfile) | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_cli.py | zest_cli.py |
import glob
import time
import json
import os
import re
import io
import random
import sys
import signal
import multiprocessing
import traceback
import pathlib
from contextlib import contextmanager
from zest.zest import ZestResult
from multiprocessing import Queue
from queue import Empty
from collections import deque
from pathlib import Path
from zest import zest
from zest.zest import log
from zest import zest_finder
from zest import zest_display
@contextmanager
def open_event_stream(output_folder, root_name):
f = None
try:
f = open(f"{output_folder}/{root_name}.evt", "a+b", buffering=0)
yield f
finally:
if f is not None:
f.close()
def emit_zest_result(zest_result, stream):
assert isinstance(zest_result, ZestResult)
msg = (zest_result.dumps() + "\n").encode()
stream.write(msg)
stream.flush()
class ZestRunnerBase:
def __init__(
self,
output_folder=Path(".zest_results"),
callback=None,
root=None,
include_dirs=None,
allow_to_run="__all__",
allow_files=None,
match_string=None,
exclude_string=None,
bypass_skip=None,
capture=False,
verbose=1,
disable_shuffle=False,
add_markers=False,
groups=None,
exclude_groups=None,
common_tmp=None,
tmp_root="/tmp",
preview=False,
**kwargs,
):
"""
output_folder:
The directory where results will be written
callback:
If not None, callback on each zest
root:
The directory under which should be searched for zests and outside of which
will be considered "library references" (greayed out in error messages)
include_dirs:
The folders (relative to root) that should be included in recursive search
allow_to_run:
If not None: A colon-delimited list of full test names (dot-delimited) that will be allowed to run
Special values:
__all__: Consider all zests to run
__failed__: Consider previous failed zests
allow_files:
If not None: A colon-delimited list of filenames (without paths or extension) that will be allowed
match_string:
If not None: A substring that if found in a zest name will include it
Note: If allow_to_run includes only a subset of zests then this match_string
can only further restrict the set. A match_string of None does not further restrict
the list at all.
exclude_string:
If not None: A substring that if found in a zest name will exclude it
bypass_skip:
Used for debugging. Ignore.
capture:
If True, capture all stdio
verbose:
0: no output
1: normal output (dots notation)
2: full test output (with names)
3: debugging traces
disable_shuffle:
True: runs zests in consistent order.
False (default): shuffles zests to root out order dependencies
add_markers:
Used for debugging. Ignore.
common_tmp:
If non-None, use this one folder as the CWD for all tests.
Otherwise a tmp folder will be created for each test
(and removed if the test passes)
tmp_root: (default "/tmp")
The root of any auto-generated tmp files
"""
zest._bubble_exceptions = False
self.callback = callback
self.output_folder = pathlib.Path(output_folder)
self.n_run = 0
self.capture = capture
self.results = []
self.retcode = 0
self.verbose = verbose
self.add_markers = add_markers
self.allow_to_run = allow_to_run
self.output_folder.mkdir(parents=True, exist_ok=True)
self.disable_shuffle = disable_shuffle
self.bypass_skip = bypass_skip
self.groups = groups
self.exclude_groups = exclude_groups
self.common_tmp = common_tmp
self.tmp_root = tmp_root
zest.reset(disable_shuffle, bypass_skip, common_tmp, tmp_root, capture)
# zest runner must start in the root of the project
# so that modules may be loaded appropriately.
self.root = root or os.getcwd()
assert self.root[0] == os.sep
allow_list = self.allow_to_run.split(":")
for r in allow_list:
if r == "__failed__":
# load_previous is a bit slow so we only want to do it if requested
prev_fails = self.load_previous()
allow_list += prev_fails
break
self.root_zests, self.allow_to_run, find_errors = zest_finder.find_zests(
root,
include_dirs,
allow_list,
allow_files.split(":") if allow_files is not None else None,
match_string,
exclude_string,
bypass_skip,
groups.split(":") if groups is not None else None,
exclude_groups.split(":") if exclude_groups is not None else None,
)
if preview:
for i in sorted(self.allow_to_run):
print(i)
print(f"\nWould have run {len(self.allow_to_run)} tests")
self.retcode = 1
return
self.handle_find_errors(find_errors)
def handle_find_errors(self, find_errors):
if len(find_errors) > 0:
zest_display.display_find_errors(find_errors)
self.retcode = 1
def is_unlimited_run(self):
"""
An unlimited run is one that has no constraints -- ir run everything.
Int that case subclass code may choose the clear all caches.
"""
return (
self.allow_to_run == "__all__"
and self.allow_files is None
and self.match_string is None
and self.groups is None
)
def load_previous(self):
fails = {}
for file in glob.glob(str(self.output_folder / "*")):
with open(file) as f:
for line in f:
res = json.loads(line)
# There can be multiple records from previous runs,
# accept the LASt state of the error run
fails[res.get("full_name")] = True if res.get("error") is not None else False
return list(set([key for key, val in fails.items() if val])) | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_runner_base.py | zest_runner_base.py |
from zest import colors
import sys
import os
import re
import io
import traceback
from zest.zest import log
def s(*strs):
for str_ in strs:
if str_ is not None:
sys.stdout.write(str_)
sys.stdout.write(colors.reset)
sys.stdout.flush()
_tb_pat = re.compile(r"^.*File \"([^\"]+)\", line (\d+), in (.*)")
def traceback_match_filename(root, line):
m = _tb_pat.match(line)
if m:
file = m.group(1)
lineno = m.group(2)
context = m.group(3)
file = os.path.relpath(os.path.realpath(file))
is_libs = True
real_path = os.path.realpath(file)
if real_path.startswith(root) and os.path.exists(real_path):
is_libs = False
if "/site-packages/" in file:
# Treat these long but commonly occurring path differently
file = re.sub(r".*/site-packages/", ".../", file)
leading, basename = os.path.split(file)
leading = f"{'./' if len(leading) > 0 and leading[0] != '.' else ''}{leading}"
return leading, basename, lineno, context, is_libs
return None
def error_header(edge, edge_style, label, width=None):
term_width = tty_size()[1]
if width is None:
width = term_width
width = min(width, term_width)
return (
edge_style
+ (edge * 5)
+ " "
+ label
+ " "
+ colors.reset
+ edge_style
+ (edge * (width - 10 - len(label)))
)
_tty_size_cache = None
def tty_size():
global _tty_size_cache
if _tty_size_cache is None:
process = os.popen("stty size", "r")
lines = process.read()
retcode = process.close()
if retcode is None:
rows, cols = lines.split()
else:
rows, cols = 50, 80
_tty_size_cache = (int(rows), int(cols))
return _tty_size_cache
def display_find_errors(errors):
s(colors.reset, colors.red, "Zest Finder Errors:\n")
for error in errors:
parent_name, path, lineno, error_message = error
s(
colors.reset,
colors.bold,
colors.red,
" ",
parent_name,
colors.reset,
colors.yellow,
f" (@ {path}:{lineno}) ",
colors.red,
f"{error_message}\n",
)
s(
colors.yellow,
f"Reminder: If you are using local functions that are not tests, prefix them with underscore.\n",
)
def display_error(root, zest_result):
stack = zest_result.full_name.split(".")
leaf_test_name = stack[-1]
formatted_test_name = " . ".join(stack[0:-1]) + colors.bold + " . " + leaf_test_name
s("\n\n", error_header("=", colors.cyan, formatted_test_name), "\n")
if zest_result.error is not None:
s("\n", error_header("-", colors.yellow, "stdout", 40), "\n")
s(zest_result.stdout)
s("\n", error_header("-", colors.yellow, "stderr", 40), "\n")
s(zest_result.stderr)
lines = []
for line in zest_result.error_formatted or [""]:
lines += [sub_line for sub_line in line.strip().split("\n")]
is_libs = False
for line in lines[1:-1]:
split_line = traceback_match_filename(root, line)
if split_line is None:
s(colors.gray if is_libs else "", line, "\n")
else:
leading, basename, lineno, context, is_libs = split_line
if is_libs:
s(colors.gray, "File ", leading, "/ ", basename)
s(colors.gray, ":", lineno)
s(colors.gray, " in function ")
s(colors.gray, context, "\n")
else:
s(
"File ",
colors.yellow,
leading,
"/ ",
colors.yellow,
colors.bold,
basename,
)
s(":", colors.yellow, lineno)
s(" in function ")
if leaf_test_name == context:
s(colors.red, colors.bold, context, "\n")
else:
s(colors.magenta, colors.bold, context, "\n")
s(
colors.red,
"raised: ",
colors.red,
colors.bold,
zest_result.error.__class__.__name__,
"\n",
)
error_message = str(zest_result.error).strip()
if error_message != "":
s(colors.red, error_message, "\n")
s()
def display_start(name, last_depth, curr_depth, add_markers):
if last_depth is not None and curr_depth is not None:
if last_depth < curr_depth:
s("\n")
if curr_depth is None:
curr_depth = 0
marker = "+" if add_markers else ""
s(" " * curr_depth, colors.yellow, marker + name, colors.reset, ": ")
# Note, no \n on this line because it will be added on the display_stop call
def display_stop(error, elapsed, skip, last_depth, curr_depth):
if elapsed is None:
elapsed = 0.0
if last_depth is not None and curr_depth is not None:
if curr_depth < last_depth:
s(f"{' ' * curr_depth}")
if isinstance(error, str) and error.startswith("skipped"):
s(colors.bold, colors.yellow, error)
elif skip is not None:
s(colors.bold, colors.yellow, "SKIPPED (reason: ", skip, ")")
elif error:
s(
colors.bold,
colors.red,
"ERROR",
colors.gray,
f" (in {int(1000.0 * elapsed)} ms)",
)
else:
s(colors.green, "SUCCESS", colors.gray, f" (in {int(1000.0 * elapsed)} ms)")
s("\n")
def display_abbreviated(error, skip):
if error:
s(colors.bold, colors.red, "F")
elif skip:
s(colors.yellow, "s")
else:
s(colors.green, ".")
def display_complete(root, zest_results, allow_to_run):
results_with_errors = [res for res in zest_results if res.error]
n_errors = len(results_with_errors)
if n_errors > 0:
for res in results_with_errors:
display_error(root, res)
s(f"\nRan {len(zest_results)} tests of the {len(allow_to_run)} that were allowed to run. ")
if n_errors == 0:
s(colors.green, "SUCCESS\n")
else:
s(colors.red, colors.bold, f"{n_errors} ERROR(s)\n")
if len(allow_to_run) != len(zest_results):
s(colors.red, colors.bold, f"WARNING: some allowed tests did not run.\n")
ran_names = {
res.full_name
for res in zest_results
}
for allowed in allow_to_run:
if allowed not in ran_names:
print(f"{allowed} did not run")
def display_timings(results):
s("Slowest 5%\n")
n_timings = len(results)
timings = [(result.full_name, result.elapsed) for result in results]
timings.sort(key=lambda tup: tup[1])
ninety_percentile = 95 * n_timings // 100
for i in range(n_timings - 1, ninety_percentile, -1):
name = timings[i]
s(" ", name[0], colors.gray, f" {int(1000.0 * name[1])} ms)\n")
def display_warnings(call_warnings):
for warn in call_warnings:
s(colors.yellow, warn, "\n")
def colorful_exception(
error=None,
formatted=None,
write_to_stderr=True,
show_raised=True,
compact=False,
gray_libs=True,
):
accum = ""
def s(*strs):
nonlocal accum
accum += "".join(strs) + colors.reset
tb_pat = re.compile(r"^.*File \"([^\"]+)\", line (\d+), in (.*)")
def _traceback_match_filename(line):
is_libs = False
m = tb_pat.match(line)
if m:
file = m.group(1)
lineno = m.group(2)
context = m.group(3)
real_path = os.path.realpath(file)
relative_path = os.path.relpath(real_path)
root = os.environ.get("ERISYON_ROOT")
if root is not None:
is_libs = True
if real_path.startswith(root):
relative_path = re.sub(r".*/" + root, "./", real_path)
is_libs = False
# Treat these long but commonly occurring path differently
if "/site-packages/" in relative_path:
relative_path = re.sub(r".*/site-packages/", ".../", relative_path)
if "/dist-packages/" in relative_path:
relative_path = re.sub(r".*/dist-packages/", ".../", relative_path)
leading, basename = os.path.split(relative_path)
# if leading and len(leading) > 0:
# leading = f"{'./' if leading[0] != '.' else ''}{leading}"
return leading, basename, lineno, context, is_libs
return None
if not compact:
s("\n")
if hasattr(error, "_root_name"):
s(colors.red, colors.bold, f"DURING ATTEMPT TO RUN {error._root_name}\n")
if formatted is None:
formatted = traceback.format_exception(
etype=type(error), value=error, tb=error.__traceback__
)
if hasattr(error, "_formatted"):
formatted = error._formatted
lines = []
for line in formatted:
lines += [sub_line for sub_line in line.strip().split("\n")]
is_libs = False
for line in lines[1:-1]:
split_line = _traceback_match_filename(line)
if split_line is None:
s(colors.gray if is_libs else "", line, "\n")
else:
leading, basename, lineno, context, is_libs = split_line
if not gray_libs:
is_libs = False
if is_libs:
s(colors.gray, "File ", leading, "/ ", basename)
s(colors.gray, ":", lineno)
s(colors.gray, " in function ")
s(colors.gray, context, "\n")
else:
s(
"File ",
colors.yellow,
leading,
"/ ",
colors.yellow,
colors.bold,
basename,
)
s(":", colors.yellow, colors.bold, lineno)
s(" in function ")
s(colors.magenta, colors.bold, context, "\n")
if show_raised:
s(
colors.red,
"raised: ",
colors.red,
colors.bold,
error.__class__.__name__,
"\n",
)
error_message = str(error).strip()
if error_message != "":
s(colors.red, error_message, "\n")
if write_to_stderr:
sys.stderr.write(accum)
return accum | zbs.zest | /zbs.zest-1.1.32.tar.gz/zbs.zest-1.1.32/zest/zest_display.py | zest_display.py |
# Zbuilder: Building VMs and applying ansible playbooks
[](https://pypi.org/project/zbuilder/)
[](https://pypistats.org/packages/zbuilder)
[](https://github.com/hasiotis/zbuilder/actions?query=workflow%3A%22Build+status%22)
[](https://zbuilder.readthedocs.io/en/develop/?badge=develop)
[](https://github.com/hasiotis/zbuilder/blob/master/LICENSE)
ZBuilder is a tool to help you build VMs ready to be transfered to ansible.
By using ansible as a library, it has access to all ansible variables. This
way it achieves high integration with ansible.
## Installation
Install and update using:
```
pip3 install --user --upgrade zbuilder
```
## Links
* [Documentation](https://zbuilder.readthedocs.io/en/stable/?badge=stable)
* [Releases](https://pypi.org/project/zbuilder/)
* [Code](https://github.com/hasiotis/zbuilder)
| zbuilder | /zbuilder-0.0.45.tar.gz/zbuilder-0.0.45/README.md | README.md |
# zbus命令行调试工具
## 下载或更新
+ 下载: sudo pip install zbus_cli
+ 更新: sudo pip install --upgrade zbus_cli
## 上传python包
1. python3 -m build
2. python3 -m twine upload dist/*'
3. 上传
+ username: token(首尾需加双下划线)
+ password: pypi-AgEIcHlwaS5vcmcCJDM4ZjYxODhhLWExZDktNGM4Yi1hYTY1LTc3OTc5ODM0ZDNiMwACKlszLCI2MzEwNTE3NS00OGZkLTRhZTctOTkwOS0wYzkxNzcxODY4ODYiXQAABiDSZYHFo8sACiQKY8puYVcQGdfV-TG5FPPmvLwUIQQRhg
## 主要版本记录
+ 0.0.6: mod json to yaml
+ 0.0.5: service call显示响应时间
+ 0.0.3: 新增topic delay功能
+ 0.0.1: 具备topic echo, hz, pub和service call功能 | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/README.md | README.md |
from collections import OrderedDict
import logging
from pkg_resources import parse_version
from zbus_cli.helper.entry_points import load_entry_points
PLUGIN_SYSTEM_VERSION = '0.1'
logger = logging.getLogger(__name__)
class PluginException(Exception):
"""Base class for all exceptions within the plugin system."""
pass
_extension_instances = {}
def instantiate_extensions(
group_name, *, exclude_names=None, unique_instance=False
):
extension_types = load_entry_points(
group_name, exclude_names=exclude_names)
extension_instances = {}
for extension_name, extension_class in extension_types.items():
extension_instance = _instantiate_extension(
group_name, extension_name, extension_class,
unique_instance=unique_instance)
if extension_instance is None:
continue
extension_instances[extension_name] = extension_instance
return extension_instances
def _instantiate_extension(
group_name, extension_name, extension_class, *, unique_instance=False
):
global _extension_instances
if not unique_instance and extension_class in _extension_instances:
return _extension_instances[extension_class]
try:
extension_instance = extension_class()
except PluginException as e: # noqa: F841
logger.warning(
f"Failed to instantiate '{group_name}' extension "
f"'{extension_name}': {e}")
return None
except Exception as e: # noqa: F841
logger.error(
f"Failed to instantiate '{group_name}' extension "
f"'{extension_name}': {e}")
return None
if not unique_instance:
_extension_instances[extension_class] = extension_instance
return extension_instance
def order_extensions(extensions, key_function, *, reverse=False):
return OrderedDict(
sorted(extensions.items(), key=key_function, reverse=reverse))
def order_extensions_by_name(extensions):
return order_extensions(extensions, lambda pair: pair[0])
def satisfies_version(version, caret_range):
assert caret_range.startswith('^'), 'Only supports caret ranges'
extension_point_version = parse_version(version)
extension_version = parse_version(caret_range[1:])
next_extension_version = get_upper_bound_caret_version(
extension_version)
if extension_point_version < extension_version:
raise PluginException(
'Extension point is too old (%s), the extension requires '
"'%s'" % (extension_point_version, extension_version))
if extension_point_version >= next_extension_version:
raise PluginException(
'Extension point is newer (%s), than what the extension '
"supports '%s'" % (extension_point_version, extension_version))
def get_upper_bound_caret_version(version):
parts = version.base_version.split('.')
if len(parts) < 2:
parts += [0] * (2 - len(parts))
major, minor = [int(p) for p in parts[:2]]
if major > 0:
major += 1
minor = 0
else:
minor += 1
return parse_version('%d.%d.0' % (major, minor)) | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_cli/helper/plugin_system.py | plugin_system.py |
from collections import defaultdict
import logging
from pkg_resources import iter_entry_points
from pkg_resources import WorkingSet
"""
The group name for entry points identifying extension points.
While all entry points in this package start with ``ros2cli.`` other
distributions might define entry points with a different prefix.
Those need to be declared using this group name.
"""
EXTENSION_POINT_GROUP_NAME = 'ros2cli.extension_point'
logger = logging.getLogger(__name__)
def get_all_entry_points():
"""
Get all entry points related to ``ros2cli`` and any of its extensions.
:returns: mapping of entry point names to ``EntryPoint`` instances
:rtype: dict
"""
extension_points = get_entry_points(EXTENSION_POINT_GROUP_NAME)
entry_points = defaultdict(dict)
working_set = WorkingSet()
for dist in sorted(working_set):
entry_map = dist.get_entry_map()
for group_name in entry_map.keys():
# skip groups which are not registered as extension points
if group_name not in extension_points:
continue
group = entry_map[group_name]
for entry_point_name, entry_point in group.items():
entry_points[group_name][entry_point_name] = \
(dist, entry_point)
return entry_points
def get_entry_points(group_name):
"""
Get the entry points for a specific group.
:param str group_name: the name of the ``entry_point`` group
:returns: mapping of group name to dictionaries which map entry point names
to ``EntryPoint`` instances
:rtype: dict
"""
entry_points = {}
for entry_point in iter_entry_points(group=group_name):
entry_points[entry_point.name] = entry_point
return entry_points
def load_entry_points(group_name, *, exclude_names=None):
"""
Load the entry points for a specific group.
:param str group_name: the name of the ``entry_point`` group
:param iterable exclude_names: the names of the entry points to exclude
:returns: mapping of entry point name to loaded entry point
:rtype: dict
"""
extension_types = {}
for entry_point in get_entry_points(group_name).values():
if exclude_names and entry_point.name in exclude_names:
continue
try:
extension_type = entry_point.load()
except Exception as e: # noqa: F841
logger.warning(
f"Failed to load entry point '{entry_point.name}': {e}")
continue
extension_types[entry_point.name] = extension_type
return extension_types
def get_first_line_doc(any_type):
if not any_type.__doc__:
return ''
lines = any_type.__doc__.splitlines()
if not lines:
return ''
if lines[0]:
line = lines[0]
elif len(lines) > 1:
line = lines[1]
return line.strip().rstrip('.') | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_cli/helper/entry_points.py | entry_points.py |
import argparse
import inspect
import os
import types
from zbus_cli.helper.entry_points import get_entry_points
from zbus_cli.helper.entry_points import get_first_line_doc
from zbus_cli.helper.plugin_system import instantiate_extensions
class CommandExtension:
"""
The extension point for 'command' extensions.
The following properties must be defined:
* `NAME` (will be set to the entry point name)
The following methods must be defined:
* `main`
The following methods can be defined:
* `add_arguments`
"""
NAME = None
EXTENSION_POINT_VERSION = '0.1'
def __init__(self):
super(CommandExtension, self).__init__()
def add_arguments(self, parser, cli_name, *, argv=None):
pass
def main(self, *, parser, args, addr):
raise NotImplementedError()
def get_command_extensions(group_name, *, exclude_names=None):
extensions = instantiate_extensions(group_name,
exclude_names=exclude_names)
for name, extension in extensions.items():
extension.NAME = name
return extensions
def add_subparsers(parser,
cli_name,
dest,
command_extensions,
hide_extensions=None,
required=True):
"""
Create argparse subparser for each extension.
The ``cli_name`` is used for the title and description of the
``add_subparsers`` function call.
For each extension a subparser is created.
If the extension has an ``add_arguments`` method it is being called.
This method is deprecated.
Use the function ``add_subparsers_on_demand`` instead.
Their signatures are almost identical.
Instead of passing the extensions the new function expects the group name
of these extensions.
:param parser: the parent argument parser
:type parser: :py:class:`argparse.ArgumentParser`
:param str cli_name: name of the command line command to which the
subparsers are being added
:param str dest: name of the attribute under which the selected extension
will be stored
:param dict command_extensions: dict of command extensions by their name
where each contributes a command with specific arguments
"""
import warnings
warnings.warn(
"'ros2cli.command.add_subparsers' is deprecated, use "
"'ros2cli.command.add_subparsers_on_demand' instead",
stacklevel=2)
# add subparser with description of available subparsers
description = ''
if command_extensions:
max_length = max(
len(name) for name in command_extensions.keys()
if hide_extensions is None or name not in hide_extensions)
for name in sorted(command_extensions.keys()):
if hide_extensions is not None and name in hide_extensions:
continue
extension = command_extensions[name]
description += '%s %s\n' % (name.ljust(max_length),
get_first_line_doc(extension))
subparser = parser.add_subparsers(
title='Commands',
description=description,
metavar=f'Call `{cli_name} <command> -h` for more detailed usage.')
# use a name which doesn't collide with any argument
# but is readable when shown as part of the the usage information
subparser.dest = ' ' + dest.lstrip('_')
subparser.required = required
# add extension specific sub-parser with its arguments
for name in sorted(command_extensions.keys()):
extension = command_extensions[name]
command_parser = subparser.add_parser(
extension.NAME,
description=get_first_line_doc(extension),
formatter_class=argparse.RawDescriptionHelpFormatter)
command_parser.set_defaults(**{dest: extension})
if hasattr(extension, 'add_arguments'):
extension.add_arguments(command_parser, f'{cli_name} {name}')
return subparser
class MutableString:
"""Behave like str with the ability to change the value of an instance."""
def __init__(self):
self.value = ''
def __getattr__(self, name):
return getattr(self.value, name)
def __iter__(self):
return self.value.__iter__()
def add_subparsers_on_demand(parser,
cli_name,
dest,
group_name,
hide_extensions=None,
required=True,
argv=None):
"""
Create argparse subparser for each extension on demand.
The ``cli_name`` is used for the title and description of the
``add_subparsers`` function call.
For each extension a subparser is created is necessary.
If no extension has been selected by command line arguments all first level
extension must be loaded and instantiated.
If a specific extension has been selected by command line arguments the
sibling extension can be skipped and only that one extension (as well as
potentially its recursive extensions) are loaded and instantiated.
If the extension has an ``add_arguments`` method it is being called.
:param parser: the parent argument parser
:type parser: :py:class:`argparse.ArgumentParser`
:param str cli_name: name of the command line command to which the
subparsers are being added
:param str dest: name of the attribute under which the selected extension
will be stored
:param str group_name: the name of the ``entry_point`` group identifying
the extensions to be added
:param list hide_extensions: an optional list of extension names which
should be skipped
:param bool required: a flag if the command is a required argument
:param list argv: the list of command line arguments (default:
``sys.argv``)
"""
# add subparser without a description for now
mutable_description = MutableString()
subparser = parser.add_subparsers(
title='Commands',
description=mutable_description,
metavar=f'Call `{cli_name} <command> -h` for more detailed usage.')
# use a name which doesn't collide with any argument
# but is readable when shown as part of the the usage information
subparser.dest = ' ' + dest.lstrip('_')
subparser.required = required
# add entry point specific sub-parsers but without a description and
# arguments for now
entry_points = get_entry_points(group_name)
command_parsers = {}
for name in sorted(entry_points.keys()):
command_parser = subparser.add_parser(
name, formatter_class=argparse.RawDescriptionHelpFormatter)
command_parsers[name] = command_parser
# temporarily attach root parser to each command parser
# in order to parse known args
root_parser = getattr(parser, '_root_parser', parser)
with SuppressUsageOutput({parser} | set(command_parsers.values())):
args = argv
# for completion use the arguments provided by the argcomplete env var
if _is_completion_requested():
from argcomplete import split_line
_, _, _, comp_words, _ = split_line(os.environ['COMP_LINE'])
args = comp_words[1:]
try:
known_args, _ = root_parser.parse_known_args(args=args)
except SystemExit:
if not _is_completion_requested():
raise
# if the partial arguments can't be parsed use no known args
known_args = argparse.Namespace(**{subparser.dest: None})
# check if a specific subparser is selected
name = getattr(known_args, subparser.dest)
if name is None:
# add description for all command extensions to the root parser
command_extensions = get_command_extensions(group_name)
if command_extensions:
description = ''
max_length = max(
len(name) for name in command_extensions.keys()
if hide_extensions is None or name not in hide_extensions)
for name in sorted(command_extensions.keys()):
if hide_extensions is not None and name in hide_extensions:
continue
extension = command_extensions[name]
description += '%s %s\n' % (name.ljust(max_length),
get_first_line_doc(extension))
command_parser = command_parsers[name]
command_parser.set_defaults(**{dest: extension})
mutable_description.value = description
else:
# add description for the selected command extension to the subparser
command_extensions = get_command_extensions(
group_name, exclude_names=set(entry_points.keys() - {name}))
extension = command_extensions[name]
command_parser = command_parsers[name]
command_parser.set_defaults(**{dest: extension})
command_parser.description = get_first_line_doc(extension)
# add the arguments for the requested extension
if hasattr(extension, 'add_arguments'):
command_parser = command_parsers[name]
command_parser._root_parser = root_parser
signature = inspect.signature(extension.add_arguments)
kwargs = {}
if 'argv' in signature.parameters:
kwargs['argv'] = argv
extension.add_arguments(command_parser, f'{cli_name} {name}',
**kwargs)
del command_parser._root_parser
return subparser
class SuppressUsageOutput:
"""Context manager to suppress help action during `parse_known_args`."""
def __init__(self, parsers):
"""
Construct a SuppressUsageOutput.
:param parsers: The parsers
"""
self._parsers = parsers
self._callbacks = {}
def __enter__(self): # noqa: D105
for p in self._parsers:
self._callbacks[p] = p.print_help, p.exit
# temporary prevent printing usage early if help is requested
p.print_help = lambda: None
# temporary prevent help action to exit early,
# but keep exiting on invalid arguments
p.exit = types.MethodType(_ignore_zero_exit(p.exit), p)
return self
def __exit__(self, *args): # noqa: D105
for p, callbacks in self._callbacks.items():
p.print_help, p.exit = callbacks
def _ignore_zero_exit(original_exit_handler):
def exit_(self, status=0, message=None):
nonlocal original_exit_handler
if status == 0:
return
return original_exit_handler(status=status, message=message)
return exit_
def _is_completion_requested():
return os.environ.get('_ARGCOMPLETE') == '1' | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_cli/command/__init__.py | __init__.py |
from zbus_cli.verb import VerbExtension
import zmq
import time
import msgpack
import yaml
class CallVerb(VerbExtension):
"""Call a service."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'service_name',
help="Name of the ZBUS service to call to (e.g. '/add_two_ints')")
arg = parser.add_argument('values',
nargs='?',
default='{}',
help='Values to fill the message')
parser.add_argument('--raw',
action='store_true',
help='Show raw messagge')
parser.add_argument('--rate',
metavar='N',
type=float,
help='Repeat the call at a specific rate in Hz')
def main(self, *, args, addr):
if args.rate is not None and args.rate <= 0:
raise RuntimeError('rate must be greater than zero')
period = 1. / args.rate if args.rate else None
return requester(args.service_name, args.values, period, args.raw,
addr)
def zbus_name_check(name: str):
if name == "" or name == "/" or '-' in name:
return None
if name[0] != '/':
name = '/' + name
return name
def requester(service_name, values, period, raw, addr):
service_name = zbus_name_check(service_name)
if service_name == None:
return "service name invalid"
context = zmq.Context()
socket: zmq.sugar.context.ST = context.socket(zmq.REQ)
socket.connect(addr['client'])
print("Connecting to server...")
try:
request = yaml.safe_load(values)
except Exception as e:
return 'the value must be yaml type'
msg2send = msgpack.dumps(request)
while True:
try:
request_time = time.time()
socket.send_multipart(
[str.encode("MDPC01"),
str.encode(service_name), msg2send])
# Get the reply.
response = socket.recv_multipart()
if len(response) != 4:
return "unrecognized msg receive!frame size:{} ".format(
len(response))
if response[1].decode('utf8') != "MDPC01":
return "unkown protocl"
if response[2].decode('utf8') != service_name:
return "service name frame size error"
response_time = time.time()
msg = msgpack.loads(response[3])
print("------------")
if raw == True:
print("RAW: ", end="")
for x in response:
print("0x%x" % x, end=" ")
print()
print("response: ", msg)
print("respond in %.3f seconds" % (response_time - request_time))
if period == None:
break
time.sleep(period)
except Exception as e:
return e
return | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_service/verb/call.py | call.py |
from zbus_cli.verb import VerbExtension
from argparse import ArgumentTypeError
import zmq
import time
import threading
import math
DEFAULT_WINDOW_SIZE = 10000
def positive_int(string):
try:
value = int(string)
except ValueError:
value = -1
if value <= 0:
raise ArgumentTypeError('value must be a positive integer')
return value
class HzVerb(VerbExtension):
"""Print the average publishing rate to screen."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'topic_name',
help="Name of the ZBUS topic to listen to (e.g. '/chatter')")
parser.add_argument(
'--window',
'-w',
dest='window_size',
type=positive_int,
default=DEFAULT_WINDOW_SIZE,
help='window size, in # of messages, for calculating rate '
'(default: %d)' % DEFAULT_WINDOW_SIZE,
metavar='WINDOW')
def main(self, *, args, addr):
topic = args.topic_name
window_size = args.window_size
topic_hz = TopicHz(topic=topic, window_size=window_size)
topic_hz.loop(addr)
class TopicHz(object):
"""TopicHz receives messages for a topic and computes frequency."""
def __init__(self, topic, window_size):
self.lock = threading.Lock()
self.msg_t0 = -1
self.msg_tn = 0
self.times = []
self.window_size = window_size
self.topic = topic
def loop(self, addr):
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(addr["sub"])
socket.subscribe(self.topic)
while True:
timeout = 1 * 1e9
enter_t = time.time_ns()
while time.time_ns() - enter_t < timeout:
message = socket.recv_multipart()
topic = message[0].decode('utf8')
if topic == self.topic:
self.callback_hz()
self.print_hz()
def callback_hz(self):
"""Calculate interval time."""
curr = time.time_ns()
if self.msg_t0 < 0 or self.msg_t0 > curr:
self.msg_t0 = curr
self.msg_tn = curr
self.times = []
else:
self.times.append(curr - self.msg_tn)
self.msg_tn = curr
if len(self.times) > self.window_size:
self.times.pop(0)
def get_hz(self):
"""
Calculate the average publising rate.
:param topic: topic name, ``list`` of ``str``
:returns: tuple of stat results
(rate, min_delta, max_delta, standard deviation, window number)
None when waiting for the first message or there is no new one
"""
# Get frequency every one minute
if len(self.times) == 0:
return
times = self.times
n = len(times)
mean = sum(times) / n
rate = 1. / mean if mean > 0. else 0
# std dev
std_dev = math.sqrt(sum((x - mean)**2 for x in times) / n)
# min and max
max_delta = max(times)
min_delta = min(times)
return rate, min_delta, max_delta, std_dev, n
def print_hz(self):
"""Print the average publishing rate to screen."""
ret = self.get_hz()
if ret is None:
return
rate, min_delta, max_delta, std_dev, window = ret
print(
'average rate: %.3f\n\tmin: %.3fs max: %.3fs std dev: %.5fs window: %s'
% (rate * 1e9, min_delta * 1e-9, max_delta * 1e-9, std_dev * 1e-9,
window))
return | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_topic/verb/hz.py | hz.py |
from zbus_cli.verb import VerbExtension
import zmq
import time
import msgpack
import yaml
def nonnegative_int(inval):
ret = int(inval)
if ret < 0:
# The error message here gets completely swallowed by argparse
raise ValueError('Value must be positive or zero')
return ret
def positive_float(inval):
ret = float(inval)
if ret <= 0.0:
# The error message here gets completely swallowed by argparse
raise ValueError('Value must be positive')
return ret
class PubVerb(VerbExtension):
"""Publish a message to a topic."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'topic_name',
help="Name of the ZBUS topic to publish to (e.g. '/chatter')")
arg = parser.add_argument('values',
nargs='?',
default='{}',
help='Values to fill the message')
group = parser.add_mutually_exclusive_group()
group.add_argument('-r',
'--rate',
metavar='N',
type=positive_float,
default=1.0,
help='Publishing rate in Hz (default: 1)')
group.add_argument('-1',
'--once',
action='store_true',
help='Publish one message and exit')
group.add_argument('-t',
'--times',
type=nonnegative_int,
default=0,
help='Publish this number of times and then exit')
def main(self, *, args, addr):
times = args.times
if args.once:
times = 1
return publisher(args.topic_name, args.values, 1. / args.rate, times,
addr)
def publisher(topic, msg, period, times, addr):
try:
context = zmq.Context()
socket: zmq.sugar.context.ST = context.socket(zmq.PUB)
socket.connect(addr["pub"])
time.sleep(1)
print('publisher: beginning loop')
count = 0
while times == 0 or count < times:
json_msg = yaml.safe_load(msg)
msg2send = msgpack.dumps(json_msg)
socket.send_multipart([str.encode(topic), msg2send])
count += 1
print('publishing #%d: %r\n' % (count, msg))
if times == 0:
time.sleep(period)
else:
time.sleep(1)
except Exception as e:
return e
return | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_topic/verb/pub.py | pub.py |
from zbus_cli.verb import VerbExtension
from argparse import ArgumentTypeError
import zmq
import time
import threading
import math
import msgpack
DEFAULT_WINDOW_SIZE = 10000
def positive_int(string):
try:
value = int(string)
except ValueError:
value = -1
if value <= 0:
raise ArgumentTypeError('value must be a positive integer')
return value
class DelayVerb(VerbExtension):
"""Display delay of topic from timestamp in header."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument('topic',
help='Topic name to calculate the delay for')
parser.add_argument(
'--window',
'-w',
dest='window_size',
type=positive_int,
default=DEFAULT_WINDOW_SIZE,
help='window size, in # of messages, for calculating rate, '
'string to (default: %d)' % DEFAULT_WINDOW_SIZE)
def main(self, *, args, addr):
topic = args.topic
window_size = args.window_size
topic_delay = TopicDelay(topic=topic, window_size=window_size)
topic_delay.loop(addr)
class TopicDelay(object):
def __init__(self, topic, window_size):
self.lock = threading.Lock()
self.last_msg_tn = 0
self.msg_t0 = -1.
self.msg_tn = 0
self.delays = []
self.topic = topic
self.window_size = window_size
def loop(self, addr):
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect(addr["sub"])
socket.subscribe(self.topic)
while True:
timeout = 1 * 1e9
enter_t = time.time_ns()
while time.time_ns() - enter_t < timeout:
message = socket.recv_multipart()
topic = message[0].decode('utf8')
if topic == self.topic:
msg = msgpack.loads(message[1])
self.callback_delay(msg)
self.print_delay()
def callback_delay(self, msg):
"""
Calculate delay time.
:param msg: Message instance
"""
if 'header' not in msg.keys():
raise RuntimeError('msg does not have header')
# with self.lock:
curr = time.time_ns()
if self.msg_t0 < 0 or self.msg_t0 > curr:
self.msg_t0 = curr
self.msg_tn = curr
self.delays = []
else:
# store the duration in self.delays
duration = curr - self.time_from_msg(msg['header']['stamp'])
self.delays.append(duration)
self.msg_tn = curr
if len(self.delays) > self.window_size:
self.delays.pop(0)
def time_from_msg(self, stamp) -> float:
return stamp['sec'] * 1.0 * 1e9 + stamp['nanosec']
def get_delay(self):
"""
Calculate the average publising delay.
:returns: tuple of stat results
(rate, min_delta, max_delta, standard deviation, window number)
None when waiting for the first message or there is no new one
"""
if self.msg_tn == self.last_msg_tn:
return
# with self.lock:
if not self.delays:
return
n = len(self.delays)
mean = sum(self.delays) / n
std_dev = math.sqrt(sum((x - mean)**2 for x in self.delays) / n)
max_delta = max(self.delays)
min_delta = min(self.delays)
self.last_msg_tn = self.msg_tn
return mean, min_delta, max_delta, std_dev, n
def print_delay(self):
"""Print the average publishing delay to screen."""
if not self.delays:
return
ret = self.get_delay()
if ret is None:
print('no new messages')
return
delay, min_delta, max_delta, std_dev, window = ret
# convert nanoseconds to seconds when print
print(
'average delay: %.3f\n\tmin: %.3fs max: %.3fs std dev: %.5fs window: %s'
% (delay * 1e-9, min_delta * 1e-9, max_delta * 1e-9,
std_dev * 1e-9, window)) | zbus-cli | /zbus_cli-0.0.7.tar.gz/zbus_cli-0.0.7/zbus_topic/verb/delay.py | delay.py |
import logging, logging.config, os
import socket, uuid
import threading, time
import json
import inspect
try:
import Queue
except:
import queue as Queue
try:
log_file = 'log.conf' #优先搜索当前目录
if os.path.exists(log_file):
logging.config.fileConfig(log_file)
else: #默认配置
import os.path
log_dir = os.path.dirname(os.path.realpath(__file__))
log_file = os.path.join(log_dir, 'log.conf')
logging.config.fileConfig(log_file)
except:
logging.basicConfig(format='%(asctime)s - %(filename)s-%(lineno)s - %(levelname)s - %(message)s')
class Meta:
http_method = set(["GET", "POST", "HEAD", "PUT", "DELETE", "OPTIONS"])
http_status = {
"200": "OK",
"201": "Created",
"202": "Accepted",
"204": "No Content",
"206": "Partial Content",
"301": "Moved Permanently",
"304": "Not Modified",
"400": "Bad Request",
"401": "Unauthorized",
"403": "Forbidden",
"404": "Not Found",
"405": "Method Not Allowed",
"416": "Requested Range Not Satisfiable",
"500": "Internal Server Error",
}
def __init__(self, meta = None):
self.status = None
self.method = 'GET'
self.uri = '/'
self.path = None
self.params = None
if meta is None or meta == '':
return
blocks = meta.split(None, 2)
if meta.startswith('HTTP'):
self.status = blocks[1]
return
self.method = blocks[0]
if len(blocks) > 1:
self.uri = blocks[1]
self._decode_uri(self.uri)
def __str__(self):
if self.status is not None:
desc = Meta.http_status.get(self.status)
if desc is None: desc = "Unknown Status"
return "HTTP/1.1 %s %s"%(self.status, desc)
if self.method:
return "%s %s HTTP/1.1"%(self.method, self.uri)
return ""
def get_path(self):
return self.path
def get_method(self):
return self.method
def get_status(self):
return self.status
def get_param(self, key, default_value=None):
if self.params is None:
return default_value
value = self.params.get(key)
if value is None:
value = default_value
return value
def _decode_uri(self, uri_str):
uri_str = str(uri_str)
idx = uri_str.find('?')
if idx < 0 :
self.path = uri_str
else:
self.path = uri_str[0:idx]
if self.path[0] == '/':
self.path = self.path[1:]
if idx < 0: return
param_str = uri_str[idx+1:]
self.params = {}
kvs = param_str.split('&')
for kv in kvs:
[k,v]= kv.split('=', 1)
self.params[k] = v
def msg_encode(msg):
res = '%s\r\n'%msg.meta
body_len = 0
if msg.body is not None:
body_len = len(msg.body)
for k,v in msg.head.iteritems():
res += '%s: %s\r\n'%(k,v)
len_key = 'content-length'
if len_key not in msg.head:
res += '%s: %s\r\n'%(len_key, body_len)
res += '\r\n'
if msg.body is not None:
res += msg.body
return res
def find_header_end(buf, start=0):
i = start
end = len(buf)
while i+3<end:
if buf[i]=='\r' and buf[i+1]=='\n' and buf[i+2]=='\r' and buf[i+3]=='\n':
return i+3
i += 1
return -1
def decode_headers(buf):
msg = Message()
buf = str(buf)
lines = buf.splitlines()
msg.meta = Meta(lines[0])
for i in range(1,len(lines)):
line = lines[i]
if len(line) == 0: continue
try:
p = line.index(':')
key = str(line[0:p]).strip()
val = str(line[p+1:]).strip()
msg.head[key] = val
except Exception, e:
logging.error(e)
params = msg.meta.params
if params:
for k,v in params.iteritems():
if k not in msg.head:
msg.head[k] = v
return msg
def msg_decode(buf, start=0):
p = find_header_end(buf, start)
if p < 0:
return (None, start)
head = buf[start: p]
msg = decode_headers(head)
if msg is None:
return (None, start)
p += 1 #new start
body_len = msg.get_head('content-length')
if body_len is None:
return (msg, p)
body_len = int(body_len)
if len(buf)-p < body_len:
return (None, start)
msg.body = buf[p: p+body_len]
return (msg,p+body_len)
class MqMode:
MQ = 1<<0
PubSub = 1<<1
Memory = 1<<2
class Message:
REMOTE_ADDR= "remote-addr"
ENCODING = "encoding"
CMD = "cmd"
SUB_CMD = "sub_cmd"
BROKER = "broker"
TOPIC = "topic"
MQ = "mq"
ID = "id"
RAWID = "rawid"
ACK = "ack"
SENDER = "sender"
RECVER = "recver"
def __init__(self):
self.meta = Meta()
self.head = {}
self.body = None
def __str__(self):
return msg_encode(self)
def get_head(self, key, default_value=None):
value = self.head.get(key)
if value is None:
value = default_value
return value
def set_head(self, key, value):
self.head[key] = value
def remove_head(self, key):
if key in self.head:
del self.head[key]
def set_body(self, body):
self.body = body
self.head['content-length'] = '%d'%len(self.body)
def set_json_body(self, body):
self.head['content-type']= 'application/json'
self.set_body(body)
#################################################################
def get_cmd(self):
return self.get_head(self.CMD)
def set_cmd(self, value):
self.set_head(self.CMD, value)
def get_sub_cmd(self):
return self.get_head(self.SUB_CMD)
def set_sub_cmd(self, value):
self.set_head(self.SUB_CMD, value)
def get_id(self):
return self.get_head(self.ID)
def set_id(self, value):
self.set_head(self.ID, value)
def get_rawid(self):
return self.get_head(self.RAWID)
def set_rawid(self, value):
self.set_head(self.RAWID, value)
def get_mq(self):
return self.get_head(self.MQ)
def set_mq(self, value):
self.set_head(self.MQ, value)
def get_topic(self):
return self.get_head(self.TOPIC)
def set_topic(self, value):
self.set_head(self.TOPIC, value)
def get_encoding(self):
return self.get_head(self.ENCODING)
def set_encoding(self, value):
self.set_head(self.ENCODING, value)
def get_sender(self):
return self.get_head(self.SENDER)
def set_sender(self, value):
self.set_head(self.SENDER, value)
def get_recver(self):
return self.get_head(self.RECVER)
def set_recver(self, value):
self.set_head(self.RECVER, value)
def is_ack(self):
ack = self.get_head(self.ACK)
if ack is None: return True
return ack == '1'
def set_ack(self, value):
if value==True:
value = '1'
else:
value = '0'
self.set_head(self.ACK, value)
def get_status(self):
return self.meta.status
def set_status(self, value):
self.meta.path = None
self.meta.status = str(value)
def is_status200(self):
return '200' == self.meta.status
def is_status404(self):
return '404' == self.meta.status
def is_status500(self):
return '500' == self.meta.status
class Proto:
Produce = "produce" #生产消息
Consume = "consume" #消费消息
Route = "route" #请求等待应答消息
CreateMQ = "create_mq"
Heartbeat = "heartbeat"; #心跳消息
Admin = "admin" #管理类消息
#线程不安全
class MessageClient(object):
log = logging.getLogger(__name__)
def __init__(self, host='localhost', port=15555):
self.pid = os.getpid()
self.host = host
self.port = port;
self.read_buf = ''
self.sock = None
self.id = uuid.uuid4()
self.auto_reconnect = True
self.reconnect_interval = 3 #3 seconds
self.msg_id_match = ''
self.result_table = {}
self.msg_cb = None
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
self.read_buf = ''
def connect_if_need(self):
if self.sock is None:
self.read_buf = ''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect( (self.host, self.port) )
self.log.info('Connected to (%s:%s)'%(self.host, self.port))
def reconnect(self):
if self.sock is not None:
self.sock.close()
self.sock = None
self.read_buf = ''
while self.sock is None:
try:
self.read_buf = ''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.debug('Trying reconnect to (%s:%s)'%(self.host, self.port))
self.sock.connect( (self.host, self.port) )
self.log.debug('Connected to (%s:%s)'%(self.host, self.port))
except socket.error, e:
self.sock = None
if self.auto_reconnect:
time.sleep(self.reconnect_interval)
else:
raise e
def mark_msg(self, msg):
if msg.get_id(): #msg got id, do nothing
return
self.msg_id_match = str(uuid.uuid4())
msg.set_id(self.msg_id_match)
def invoke(self, msg, timeout=10):
self.send(msg, timeout)
return self.recv(timeout)
def send(self, msg, timeout=10):
self.connect_if_need()
self.mark_msg(msg)
self.log.debug('Request: %s'%msg)
self.sock.send(str(msg))
def recv(self, timeout=10):
if self.msg_id_match in self.result_table:
return self.result_table[self.msg_id_match]
self.connect_if_need()
self.sock.settimeout(timeout)
while True:
buf = self.sock.recv(1024)
self.read_buf += buf
idx = 0
while True:
msg, idx = msg_decode(self.read_buf, idx)
if msg is None:
if idx != 0:
self.read_buf = self.read_buf[idx:]
break
self.read_buf = self.read_buf[idx:]
if self.msg_cb: #using msg callback
self.msg_cb(msg)
continue
if self.msg_id_match and msg.get_id() != self.msg_id_match:
self.result_table[msg.get_id()] = msg
continue
self.log.debug('Result: %s'%msg)
return msg
class ClientHint:
def __init__(self):
self.mq = None
self.broker = None
self.requestIp = None
class ClientPool:
log = logging.getLogger(__name__)
def __init__(self, host='localhost', port=15555, maxsize=50, timeout=10):
self.client_class = MessageClient
self.host = host
self.port = port
self.maxsize = maxsize
self.timeout = timeout
self.reset()
def make_client(self):
client = MessageClient(host=self.host, port=self.port)
self.clients.append(client)
self.log.debug('New client created %s', client)
return client
def _check_pid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
return
self.log.debug('new process, pid changed')
self.destroy()
self.reset()
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
self.client_pool = Queue.LifoQueue(self.maxsize)
while True:
try:
self.client_pool.put_nowait(None)
except Queue.Full:
break
self.clients = []
def borrow_client(self, hint=None): #mq ignore
self._check_pid()
client = None
try:
client = self.client_pool.get(block=True, timeout=self.timeout)
except Queue.Empty:
raise Exception('No client available')
if client is None:
client = self.make_client()
return client
def return_client(self, client):
self._check_pid()
if client.pid != self.pid:
return
if not isinstance(client, (tuple, list)):
client = [client]
for c in client:
try:
self.client_pool.put_nowait(c)
except Queue.Full:
pass
def destroy(self):
for client in self.clients:
client.close()
class Broker(object):
def get_client(self, client_hint=None):
raise NotImplementedError("unimplemented")
def close_client(self, client):
raise NotImplementedError("unimplemented")
def inovke(self, msg):
raise NotImplementedError("unimplemented")
def destroy(self):
raise NotImplementedError("unimplemented")
class SingleBroker(Broker):
def __init__(self, host='localhost', port=15555, maxsize=128):
self.host = host
self.port = port
self.pool = ClientPool(host=host, port=port, maxsize=maxsize)
def get_client(self, client_hint=None):
return MessageClient(self.host, self.port)
def close_client(self, client):
if client:
client.close()
def invoke(self, msg, timeout=10):
client = self.pool.borrow_client()
try:
return client.invoke(msg, timeout)
finally:
self.pool.return_client(client)
def destroy(self):
self.pool.destroy()
class Producer:
def __init__(self, broker=None, mq = None):
self.broker = broker
self.mq = mq
def send(self, msg):
msg.set_cmd(Proto.Produce)
msg.set_mq(self.mq)
return self.broker.invoke(msg)
class Consumer:
log = logging.getLogger(__name__)
def __init__(self, broker, mq = None, mode=MqMode.MQ, topic=None):
self.broker = broker
self.mq = mq
self.mode = mode
self.topic = topic
self.client = None
def myClientHint(self):
return None
def createMQ(self):
msg = Message()
msg.set_cmd(Proto.CreateMQ)
msg.set_head("mq_name", self.mq)
msg.set_head("mq_mode", self.mode)
res = self.client.invoke(msg, 10)
return res.is_status200()
def recv(self, timeout=10):
if self.client is None:
hint = self.myClientHint()
self.client = self.broker.get_client(hint)
msg = Message()
msg.set_cmd(Proto.Consume)
msg.set_mq(self.mq)
if self.mode & MqMode.PubSub:
if self.topic:
msg.set_topic(self.topic)
try:
res = self.client.invoke(msg, timeout)
if res.is_status404():
if not self.createMQ():
raise Exception('MQ(%s) creation failed')
return self.recv(timeout)
res.set_id(res.get_rawid())
res.remove_head(Message.RAWID)
res.remove_head(Message)
return res
except socket.timeout: #等待消息超时
return None
except socket.error, e: #网络错误
self.log.debug(e)
hint = self.myClientHint()
self.broker.close_client(self.client)
self.client = self.broker.get_client(hint)
return self.recv(timeout)
def route(self, msg, timeout=10):
status = msg.get_status()
if not status:
msg.set_status("200");
msg.set_cmd(Proto.Route);
msg.set_ack(False)
self.client.send(msg)
class Caller:
log = logging.getLogger(__name__)
def __init__(self, broker = None, mq = None):
self.broker = broker
self.mq = mq
def invoke(self, msg, timeout=10):
msg.set_cmd(Proto.Produce)
msg.set_mq(self.mq)
msg.set_ack(False)
self.log.debug('Request: %s'%msg)
res = self.broker.invoke(msg, timeout)
self.log.debug('Result: %s'%res)
return res
class ServiceHandler(object):
def __call__(self, req):
return self.handle_request(req)
def handle_request(self, msg):
raise Exception('unimplemented')
class ServiceConfig:
def __init__(self):
self.service_andler = None;
self.broker = None;
self.service_name = None;
self.thread_count = 1;
self.consume_timeout = 10; #seconds
class WorkerThread(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.handler = config.service_andler
if not isinstance(self.handler, ServiceHandler):
raise Exception('handler not support')
self.mq = config.service_name
self.broker = config.broker
self.consume_timeout = config.consume_timeout
def run(self):
consumer = Consumer(broker=self.broker, mq=self.mq)
while True:
msg = consumer.recv(self.consume_timeout)
if msg is None: continue
sender = msg.get_sender()
msgid = msg.get_id()
res = self.handler.handle_request(msg)
if res is None: continue
res.set_id(msgid)
res.set_recver(sender)
consumer.route(res, self.consume_timeout)
class Service(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.config = config
self.thread_count = config.thread_count
def run(self):
workers = []
for i in range(self.thread_count):
workers.append(WorkerThread(self.config))
for w in workers:
w.start()
for w in workers:
w.join()
class MyServiceHandler(ServiceHandler):
def handle_request(self, msg):
print msg
res = Message()
res.set_status('200')
res.set_body('hello server@%s'%time.time())
return res
def Remote( _id = None ):
def func(fn):
fn.remote_id = _id or fn.__name__
return fn
return func
class Rpc(Caller):
log = logging.getLogger(__name__)
def __init__(self, broker=None, mq = None, module='', mehtod=None,
timeout=10, encoding='utf8'):
Caller.__init__(self, broker=broker, mq = mq)
self.timeout = timeout
self.method = mehtod
self.module = module
self.encoding = encoding
def __getattr__(self, name):
rpc = Rpc(broker=self.broker,
mq=self.mq,
module=self.module,
timeout=self.timeout,
encoding=self.encoding)
rpc.method = name;
return rpc
def _error_msg(self, msg_req):
return '=========RPC Context=========\nMQ(%s)-Module(%s)-Method(%s)\n=========Message Dump========\n%s'%(self.mq, self.module, self.method, msg_req)
def invoke(self, args):
req = {'module': self.module, 'method': self.method, 'params': args}
msg_req = Message()
msg_req.set_json_body(json.dumps(req, encoding=self.encoding))
try:
msg_res = Caller.invoke(self, msg_req, self.timeout)
except socket.timeout, e:
error_msg = 'Request Timeout\n%s'%(self._error_msg(msg_req))
raise Exception(error_msg)
except socket.error, e:
error_msg = '%s\n%s'%(e, self._error_msg(msg_req))
raise Exception(error_msg)
if msg_res is None:
error_msg = self._error_msg(msg_req)
raise Exception(error_msg)
if msg_res.is_status404():
msg_res_body = msg_res.body
error_msg = '%s\n%s'%(msg_res_body, self._error_msg(msg_req))
raise Exception(error_msg)
res = json.loads(msg_res.body, encoding=msg_res.get_encoding())
if not msg_res.is_status200():
error_text = 'unknown error'
if 'stackTrace' in res: error_text = res['stackTrace']
elif 'error' in res: error_text = res['error']
error_msg = '%s\n%s'%(error_text, self._error_msg(msg_req))
raise Exception(error_msg)
if 'result' in res:
return res['result']
error_text = 'bad json result format'
error_msg = '%s\n%s'%(error_text, self._error_msg(msg_req))
raise Exception(error_msg)
def __call__(self, *args):
return self.invoke(args)
class RpcServiceHandler(ServiceHandler):
def __init__(self):
self.methods = {}
def add_module(self, module, service):
methods = inspect.getmembers(service, predicate=inspect.ismethod)
for method in methods:
if hasattr(method[1], 'remote_id'):
remote_id = getattr(method[1], 'remote_id')
key = '%s:%s'%(module,remote_id)
if key in self.methods:
print '%s duplicated'%key
self.methods[key] = method[1]
def handle_request(self, msg):
try:
encoding = msg.get_encoding()
if encoding is None:
encoding = 'utf8'
return self.handle_json_request(msg.body, encoding)
except Exception, error:
msg = Message()
msg.set_status('500')
msg.set_json_body(json.dumps({'error': str(error), 'stack_trace': str(error)}, encoding=encoding))
return msg
def handle_json_request(self, json_str, encoding='utf-8'):
error = None
result = None
status = '400'
try:
req = json.loads(json_str, encoding=encoding)
except Exception, e:
error = Exception('json format error: %s'%str(e))
if error is None:
try:
module = req['module']
method = req['method']
params = req['params']
except:
error = Exception('parameter error: %s'%json_str)
if error is None:
key = '%s:%s'%(module,method)
if key not in self.methods:
error = Exception('%s method not found'%key)
else:
method = self.methods[key]
if error is None:
try:
result = method(*params)
except Exception, e:
error = e
#return result
try:
if error is not None:
data = json.dumps({'error': str(error), 'stack_trace': str(error)}, encoding=encoding)
else:
status = '200'
data = json.dumps({'result': result}, encoding=encoding)
except:
status = '500'
data = json.dumps({'error': error })
msg = Message()
msg.set_status(status)
msg.set_json_body(data)
return msg
######################KCXP##############################
######################TRADE##############################
__all__ = [
Proto,MqMode, Message,MessageClient,SingleBroker,
Producer, Consumer, Caller,
ServiceConfig, Service,
Remote, Rpc,
ServiceHandler, RpcServiceHandler
] | zbus | /zbus-0.0.3.zip/zbus-0.0.3/zbus.py | zbus.py |
from sqlalchemy import MetaData, text, and_
from sqlalchemy.sql.expression import select, delete
from contextlib import contextmanager
from sqlalchemy.ext.declarative import DeclarativeMeta
import json
import datetime
class AlchemyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return (datetime.datetime.min + obj).time().isoformat()
if isinstance(obj.__class__, DeclarativeMeta):
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
fields[field] = obj.__getattribute__(field)
return fields
return json.JSONEncoder.default(self, obj)
class Dict(dict): # dynamic property support, such as d.name
def __getattr__(self, name):
if name in self: return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
self.pop(name, None)
def __getitem__(self, key):
if key not in self: return None
return dict.__getitem__(self, key)
config = Dict() # default to mysql
def escape(key):
if config.escape:
return config.escape(key)
if '.' in key:
return key
return "`%s`" % key
def sql_page(sql, page, limit):
if config.sql_page:
return config.sql_page(sql, page, limit)
page = int(page)
limit = int(limit)
return '%s limit %d offset %d' % (sql, limit, (page-1)*limit)
def sql_count(sql):
if config.sql_count:
return config.sql_count(sql)
return f"select count(0) as total from ({sql}) as t"
class Dict(dict): # dynamic property support, such as d.name
def __getattr__(self, name):
if name in self: return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
self.pop(name, None)
def __getitem__(self, key):
if key not in self: return None
return dict.__getitem__(self, key)
class Db:
def __init__(self, engine, reflect=True):
self.engine = engine
self.tables = {}
self.meta = MetaData()
if reflect:
self.reflect()
def reflect(self):
self.meta.reflect(bind=self.engine)
self.tables = self.meta.tables
@contextmanager
def session(self):
"""Provide a transactional scope around a series of operations."""
sa_conn = self.engine.connect()
tx = sa_conn.begin()
try:
connection = Connection(sa_conn, self.tables)
yield connection
tx.commit()
except:
tx.rollback()
raise
finally:
sa_conn.close()
@contextmanager
def connection(self):
"""Expose raw connection"""
sa_conn = self.engine.connect()
tx = sa_conn.begin()
try:
yield sa_conn
tx.commit()
except:
tx.rollback()
raise
finally:
sa_conn.close()
def query(self, sql, converter=None, session=None, **kvargs):
if session:
return session.query(sql, converter, **kvargs)
with self.session() as s:
return s.query(sql, converter, **kvargs)
def query_page(self, sql, converter=None, session=None, **kvargs):
if session:
return session.query_page(sql, converter, **kvargs)
with self.session() as s:
return s.query_page(sql, converter, **kvargs)
def query_one(self, sql, converter=None, session=None, **kvargs):
if session:
return session.query_one(sql, converter, **kvargs)
with self.session() as s:
return s.query_one(sql, converter, **kvargs)
def execute(self, sql, session=None, **kvargs):
if session:
return session.execute(sql, **kvargs)
with self.session() as s:
return s.execute(sql, **kvargs)
def add(self, table, json_data, session=None):
if session:
return session.add(table, json_data)
with self.session() as s:
return s.add(table, json_data)
def add_many(self, table, data, session=None):
if session:
return session.add_many(table, data)
with self.session() as s:
return s.add_many(table, data)
def update_many(self, table, data, session=None):
if session:
return session.update_many(table, data)
with self.session() as s:
return s.update_many(table, data)
def execute_many(self, sql, data, session=None):
if session:
return session.execute_many(sql, data)
with self.session() as s:
return s.execute_many(sql, data)
def merge(self, table, json_data, session=None):
if session:
return session.merge(table, json_data)
with self.session() as s:
return s.merge(table, json_data)
def save(self, table, json_data, session=None):
if session:
return session.save(table, json_data)
with self.session() as s:
return s.save(table, json_data)
def delete(self, table, key, session=None):
if session:
return session.delete(table, key)
with self.session() as s:
return s.delete(table, key)
def one(self, table, key, c=None, session=None):
if session:
return session.one(table, key, c)
with self.session() as s:
return s.one(table, key, c)
def list(self, table, p=0, n=100, c=None, key=None, key_name=None, session=None):
if session:
return session.list(table, p=p, n=n, c=c, key=key, key_name=key_name)
with self.session() as s:
return s.list(table, p=p, n=n, c=c, key=key, key_name=key_name)
class Connection:
def __init__(self, conn, tables):
self.connection = conn
self.tables = tables
def query(self, sql, converter=None, **kvargs):
return self._query(sql, converter, **kvargs)
def query_page(self, sql, converter=None, **kvargs):
page = kvargs.get('page') or 1
limit = kvargs.get('limit') or 20
do_count = kvargs.get('do_count') # 0--only data, 1/None--count + data, 2--only count
if do_count is None:
do_count = 1
total, data = None, None
if do_count >= 1:
sql_c = sql_count(sql)
res = self.query_one(sql_c, converter, **kvargs)
total = res.total
if do_count <= 1:
sql_p = sql_page(sql, page, limit)
sql_p = text(sql_p)
data = self._query(sql_p, converter, **kvargs)
return Dict({
'total': total,
'page': page,
'limit': limit,
'data': data
})
def _query(self, s, converter=None, **kvargs):
if isinstance(s, str):
s = text(s)
rs = self.connection.execute(s, **kvargs)
def c(row):
if not converter:
r = Dict(row)
sub_dict = {}
for name in r:
bb = name.split('.') # handle . for layer object
key = None
if len(bb) > 1:
obj_name, key = bb
obj = sub_dict.get(obj_name)
if not obj:
sub_dict[obj_name] = obj = {}
v = r[name]
if isinstance(v, bytes):
if len(v) == 1:
v = int(v[0])
if key:
obj[key] = v
else:
r[name] = v
r.update(sub_dict)
return r
return converter(r)
return [c(row) for row in rs]
def query_one(self, sql, converter=None, **kvargs):
res = self.query(sql, converter, **kvargs)
if len(res) > 0: return res[0]
def execute(self, sql, **kvargs):
if isinstance(sql, str):
sql = text(sql)
return self.connection.execute(sql, **kvargs)
def _check_table(self, table):
if table not in self.tables:
raise Exception('Table(%s) Not Found' % table)
return self.tables[table]
def _primary_key(self, table):
t = self._check_table(table)
if len(t.primary_key) != 1:
raise Exception('Table(%s) primary key not single' % table)
for c in t.primary_key:
return t, c
def _table_and_column(self, s):
bb = s.split('.')
if len(bb) != 2:
raise Exception('Invalid table and column string: %s' % s)
t = self._check_table(bb[0])
if bb[1] not in t.c:
raise Exception('Column(%s) not in Table(%s)' % (bb[1], bb[0]))
return t, bb[1]
def _batch_query(self, t, col_name, value_set):
value_set = list(value_set)
if len(value_set) == 1:
s = select([t]).where(t.c[col_name] == value_set[0])
else:
s = select([t]).where(t.c[col_name].in_(value_set))
data = self._query(s)
res = {}
for row in data:
k = row[col_name]
if k not in res:
res[k] = [row]
else:
res[k].append(row)
return res
def delete(self, table, key):
t, c_key = self._primary_key(table)
s = delete(t).where(t.c[c_key.name] == key)
self.connection.execute(s)
def one(self, table, key, c=None):
res = self.list(table, key=[key], c=c)
if res and len(res) >= 1:
return res[0]
def list(self, table, p=0, n=100, c=None, key=None, key_name=None):
"""
@param table: table mapping name(table raw name by default)
@param p: page index
@param n: size of page
@param c: column list
@param key: key list or single key
@param key_name: replace the primary key if set
"""
t = self._check_table(table)
c_list = [t]
if c:
if not isinstance(c, (list, tuple)):
c = [c]
c_list = [t.c[name] for name in c if name in t.c]
s = select(c_list)
if key:
if not key_name:
_, k = self._primary_key(table)
key_name = k.name
if not isinstance(key, (list, tuple)):
key = [key]
if len(key) == 1:
s = s.where(t.c[key_name].op('=')(key[0]))
else:
s = s.where(t.c[key_name].in_(key))
else:
if n:
page = int(p)
page_size = int(n)
s = s.limit(page_size)
s = s.offset(page * page_size)
return self._query(s)
def add(self, table, json_data):
self._check_table(table)
t = self.tables[table]
sql = t.insert()
data = Dict({key: json_data[key] for key in json_data if key in t.c})
res = self.connection.execute(sql, data)
inserted_keys = res.inserted_primary_key
i = 0
for c in t.primary_key:
if i >= len(inserted_keys):
break
data[c.name] = inserted_keys[i]
i += 1
return data
def add_many(self, table, data):
t = self._check_table(table)
return self.execute_many(t.insert(), data)
def update_many(self, table, data):
if len(data) == 0:
return
row = data[0]
t = self._check_table(table)
primary_keys = []
update_cols = []
for c in t.c:
if c.name not in row:
continue
col = f"{escape(c.name)}=:{c.name}"
if c.primary_key:
primary_keys.append(col)
else:
update_cols.append(col)
updates = ', '.join(update_cols)
where = ' and '.join(primary_keys)
sql = f"UPDATE {escape(t.name)} SET {updates} WHERE {where}"
return self.execute_many(sql, data)
def execute_many(self, sql, data):
if isinstance(sql, str):
sql = text(sql)
if not isinstance(data, (tuple, list)):
data = [data]
# data must be array of dict!!!
data = [dict(d) for d in data]
res = self.connection.execute(sql, data)
return res.rowcount
def update(self, table, json_data):
return self.merge(table, json_data)
def merge(self, table, json_data):
self._check_table(table)
t = self.tables[table]
values, where = {}, []
for key in json_data:
if key not in t.c:
continue
if key in t.primary_key:
cond = t.c[key] == json_data[key]
where.append(cond)
else:
values[key] = json_data[key]
if len(where) == 0:
raise Exception("Missing database primary key in merge action")
sql = t.update().where(and_(*where)).values(**values)
return self.connection.execute(sql).rowcount
def save(self, table, json_data):
self._check_table(table)
update = False
t = self.tables[table]
for key in json_data:
if key in t.primary_key:
update = True
sql = t.select().where(t.c[key] == json_data[key])
res = self.query_one(sql)
if not res:
update = False
break
if update:
return self.merge(table, json_data)
return self.add(table, json_data) | zbuspy | /zbuspy-1.2.0.zip/zbuspy-1.2.0/zbus/db.py | db.py |
import inspect
import simplejson as json
import logging.config
import os
import sys
from threading import Thread
import threading
import time
import uuid
import re
import traceback
import hmac
import hashlib
from websocket import WebSocketApp
from datetime import date, datetime
context = threading.local()
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
if isinstance(obj, (set)):
return list(obj)
return obj
class Protocol:
MEMORY = 'memory'
DISK = 'disk'
DB = 'db'
MASK_DELETE_ON_EXIT = 1 << 0
MASK_EXCLUSIVE = 1 << 1
CONTENT_TYPE = 'Content-Type'
try:
log_file = 'log.conf'
if os.path.exists(log_file):
logging.config.fileConfig(log_file)
else:
import os.path
log_dir = os.path.dirname(os.path.realpath(__file__))
log_file = os.path.join(log_dir, 'log.conf')
logging.config.fileConfig(log_file)
except:
logging.basicConfig(
format='%(asctime)s - %(filename)s-%(lineno)s - %(levelname)s - %(message)s')
# support both python2 and python3
if sys.version_info[0] < 3:
def _bytes(buf, encoding='utf8'):
return buf.encode(encoding)
def file_read(file_path, encoding='utf8'):
with open(file_path) as f:
file_content = f.read()
return file_content.decode(encoding)
else:
def _bytes(buf, encoding='utf8'):
return bytes(buf, encoding)
def file_read(file_path, encoding='utf8'):
with open(file_path, encoding=encoding) as f:
file_content = f.read()
return file_content
class Dict(dict): #dynamic property support, such as d.name
def __getattr__(self, name):
if name in self: return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
self.pop(name, None)
def __getitem__(self, key):
if key not in self: return None
return dict.__getitem__(self, key)
class Message(Dict):
def __init__(self, status=None, body=None, data=None):
self.replace(data)
if status:
self.status = status
self.headers[Protocol.CONTENT_TYPE] = 'text/html; charset=utf8'
self.body = body
def replace(self, data):
if data == self: return
self.clear()
if data and isinstance(data, (Message, dict, Dict)):
for key in data:
self[key] = data[key]
name = 'headers'
if name not in self:
self[name] = Dict()
if self[name] is not Dict:
self[name] = Dict(self[name])
def content_type(self): #case insensitive
for k in self.headers:
if k.lower() == 'content-type':
return self.headers[k]
def get_cookie_table(self):
table = Dict()
cookie_str = self.headers.get('cookie')
if not cookie_str:
return table
for block in cookie_str.split(';'):
i = block.find('=')
key = (block[0:i]).strip()
val = (block[i+1:]).strip()
table[key] = val
return table
def get_cookie(self, key):
table = self.get_cookie_table()
return table.get(key)
def set_cookie(self, key, val):
self.headers['set-cookie'] = '%s=%s'%(key, val)
def redirect(location):
res = Message()
res.status = 302
res.headers['Location'] = location
return res
def to_json_sign(msg):
signFields = msg.headers['signFields']
j = Dict()
j.headers = Dict()
j.headers['apiKey'] = msg.headers['apiKey'] #apiKey required
j.headers['signFields'] = signFields
bb = signFields.split(',')
for key in bb:
key = key.strip()
if key == '': continue
if key.startswith('h.'):
hkey = key[2:]
if hkey == '*':
j.headers.update(msg.headers)
else:
j.headers[hkey] = msg.headers[hkey]
else:
if key in msg:
j[key] = msg[key]
return j
def sign_message(api_key, secret_key, sign_fields, msg, encoding='utf8'):
del msg.headers['signature']
msg.headers['apiKey'] = api_key
msg.headers['signFields'] = sign_fields
j = to_json_sign(msg)
data = json.dumps(j, separators=(',', ':'), sort_keys=True)
sign = hmac.new(_bytes(secret_key,encoding), msg=_bytes(data, encoding), digestmod=hashlib.sha256).hexdigest()
msg.headers['signature'] = sign
class CountDownLatch(Dict):
def __init__(self, count=1):
self.count = count
self.lock = threading.Condition()
self.is_set = False
def count_down(self):
if self.is_set:
return
self.lock.acquire()
self.count -= 1
if self.count <= 0:
self.lock.notifyAll()
self.is_set = True
self.lock.release()
def wait(self, timeout=3):
self.lock.acquire()
if self.count > 0:
self.lock.wait(timeout)
self.lock.release()
class WebsocketClient(object):
log = logging.getLogger(__name__)
def __init__(self, address):
self.websocket = None
self.callback_table = {}
self.address = address
#auth
self.api_key = None
self.secret_key = None
self.sign_fields = ''
self.auth_enabled = False
self.reconnect_delay = 3
self.auto_connect = True
self.connect_lock = threading.Lock()
self.connect_active = False
self.pending_messages = []
address = address.lower()
if not address.startswith("ws://") and not address.startswith("wss://"):
self.address = "ws://"+address
if address.startswith('http://'):
self.address = "ws://" + address[7:]
if address.startswith('https://'):
self.address = "wss://" + address[8:]
self.heartbeat_enabled = True
self.heartbeat_interval = 30 #seconds
self.heartbeat_msg = None
self.heartbeat_thread = None
self.before_send = None
self.after_recv = None
def onmessage(msg):
req = json.loads(msg)
req = Message(data=req)
if self.after_recv:
self.after_recv(req)
if req.headers['id'] in self.callback_table:
cb = self.callback_table[req.headers['id']]
del self.callback_table[req.headers['id']]
if cb.ondata:
cb.ondata(req)
self.onmessage = onmessage
def _on_message(_, msg):
if self.onmessage:
self.onmessage(msg)
self._on_message = _on_message
def onclose(client):
self.log.warn('Trying to reconnect in %d seconds'%self.reconnect_delay)
time.sleep(self.reconnect_delay)
client.websocket = None
client.connect()
self.onclose = onclose
def _on_close(_):
self.connect_active = False
if self.onclose:
self.onclose(self)
self._on_close = _on_close
self.onopen = None
def _on_open(_):
self.log.debug("Connected to %s"%self.address)
self.connect_active = True
for msg in self.pending_messages:
self.websocket.send(msg)
self.pending_messages = []
if self.onopen: #run in new thread, prevent blocking
t = Thread(target=self.onopen, args=(self,))
t.setDaemon(True)
t.start()
self._on_open = _on_open
def onerror(error):
self.log.error(error)
self.onerror = onerror
def _on_error(_, error):
if self.onerror:
self.onerror(error)
self._on_error = _on_error
def enable_auth(self, api_key=None, secret_key=None, auth_enabled=True):
self.auth_enabled = auth_enabled
self.api_key = api_key
self.secret_key = secret_key
def heartbeat(self):
if not self.heartbeat_enabled: return
if self.heartbeat_thread: return
if not self.heartbeat_msg: return
def do_heartbeat():
while True:
time.sleep(self.heartbeat_interval)
if self.websocket:
try:
self.send(self.heartbeat_msg)
except:
pass
self.heartbeat_thread = Thread(target=do_heartbeat)
self.heartbeat_thread.setDaemon(True)
self.heartbeat_thread.start()
def connect(self):
with self.connect_lock:
if self.websocket: return #connecting
if not self.auto_connect:
self.onclose = None
self.websocket = WebSocketApp(self.address,
on_open=self._on_open,
on_message=self._on_message,
on_close=self._on_close,
on_error=self._on_error)
self.heartbeat()
def run():
self.websocket.run_forever()
t = Thread(target=run)
t.setDaemon(False)
t.start()
def invoke(self, req, ondata=None, onerror=None, before_send=None, timeout=10):
req = Message(data=req)
req.headers['id'] = str(uuid.uuid4())
sync = None
if ondata is None:
sync = CountDownLatch(1)
def callback(res):
sync.result = res
sync.count_down()
ondata = callback
cb = self.callback_table[req.headers['id']] = Dict()
cb.ondata = ondata
cb.onerror = onerror
self.send(req, before_send=before_send)
if sync:
sync.wait(timeout)
return sync.result
def send(self, data, before_send=None):
handler = before_send or self.before_send
if handler:
handler(data)
if self.auth_enabled:
if self.api_key is None:
raise 'missing api_key for auth'
if self.secret_key is None:
raise 'missing secret_key for auth'
sign_message(self.api_key, self.secret_key, self.sign_fields, data)
msg = json.dumps(data, default=json_serial)
if not self.connect_active:
self.pending_messages.append(msg)
self.connect()
return
self.websocket.send(msg)
def close(self):
self.onclose = None
if self.websocket:
self.websocket.close()
self.websocket = None
self.connect_active = False
class MqClient(WebsocketClient):
log = logging.getLogger(__name__)
def __init__(self, address):
WebsocketClient.__init__(self, address)
self.handler_table = {} #mq=>{channel=>handler}
self.heartbeat_msg = Message()
self.heartbeat_msg.headers['cmd'] = 'ping'
def onmessage(msg):
req = json.loads(msg)
req = Message(data=req)
if self.after_recv:
self.after_recv(req)
if req.headers['id'] in self.callback_table:
cb = self.callback_table[req.headers['id']]
del self.callback_table[req.headers['id']]
if cb.ondata:
cb.ondata(req)
return
mq, channel = req.headers['mq'], req.headers['channel']
if mq not in self.handler_table:
self.log.warn("Missing handler for mq=%s, msg=%s"%(mq, msg))
return
handlers = self.handler_table[mq]
if channel not in handlers:
self.log.warn("Missing handler for mq=%s, channel=%s"%(mq,channel))
return
mq_handler = handlers[channel]
mq_handler.handler(req)
#update window if limit reached
window = req.headers['window']
if window is not None and int(window) <= mq_handler.window/2:
sub = Message()
sub.headers['cmd'] = 'sub'
sub.headers['mq'] = mq
sub.headers['channel'] = channel
sub.headers['window'] = mq_handler.window
sub.headers['ack'] = 'false'
self.send(sub, mq_handler.before_send)
self.onmessage = onmessage
def add_mq_handler(self, mq=None, channel=None, handler=None, window=1, before_send=None):
if mq not in self.handler_table:
self.handler_table[mq] = {}
mq_handler = Dict()
mq_handler.handler = handler
mq_handler.window = window
mq_handler.before_send = before_send
self.handler_table[mq][channel] = mq_handler
#===================================RPC======================================
'''
path -- url path
method -- http method: GET|POST etc.
'''
def route(path=None, method=None):
def func(fn):
if path:
fn.__path = path
if method:
fn.__method = method
return fn
return func
def exclude():
def func(fn):
fn.__exclude = True
return fn
return func
def add_filter(filter_fn=None):
def func(fn):
if filter_fn:
fn.__filter = filter_fn
fn.__class_filter = (fn, filter_fn)
return fn
return func
#-------------------------------------------------------------------------
def join_path(*args):
if len(args) == 1 and isinstance(args[0], list):
args = args[0]
p = '/'.join(args)
p = '/'+p
p = re.sub(r'[/]+','/', p)
if len(p) > 1 and p.endswith('/'):
p = p[0:-1]
return p
class RpcClient(WebsocketClient):
def __init__(self, address, url_prefix='', timeout=10):
WebsocketClient.__init__(self, address)
self.timeout = timeout #10s for sync invoke
self.heartbeat_msg = Message()
self.heartbeat_msg.headers['cmd'] = 'ping'
self.url_prefix = url_prefix
def invoke(self, method='', params=[], url_prefix='', ondata=None, onerror=None, timeout=10):
req = Message()
req.url = join_path(url_prefix, method)
req.body = params
sync = None
if ondata is None:
sync = CountDownLatch(1)
def callback(res):
sync.result = res
sync.count_down()
ondata = callback
def onmessage(msg):
if msg.status == 200:
ondata(msg.body)
else:
e = Exception(msg.body)
if onerror:
onerror(e)
else:
if sync:
sync.error = e
sync.count_down()
WebsocketClient.invoke(self, req, ondata=onmessage, onerror=onerror)
if sync:
sync.wait(timeout)
if sync.error:
raise sync.error
return sync.result
def __getattr__(self, name):
return self._invoker(name)
def _invoker(self, module):
url_prefix = join_path(self.url_prefix, module)
return RpcInvoker(client=self, url_prefix=url_prefix, timeout=self.timeout)
class RpcInvoker:
def __init__(self, client=None, url_prefix='', method='', timeout=10):
self.client = client
self.url_prefix = url_prefix
self.method = method
self.timeout = timeout
def __getattr__(self, name):
return RpcInvoker(client=self.client, url_prefix=self.url_prefix, method=name, timeout=self.timeout)
def __call__(self, *args, **kv_args):
return self.client.invoke(method=self.method, params=args, url_prefix=self.url_prefix, timeout=self.timeout,**kv_args)
class RpcInfo:
RpcInfoTemplate = '''
<html><head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<title>%s Python</title>
%s
</head>
<body>
<script>
var rpc;
function init(){
rpc = new RpcClient(null,'%s');
}
</script>
<script async src="https://unpkg.com/zbus/zbus.min.js" onload="init()">
</script>
<div>
<div class="url">
<span>URL=%s[module]/[method]/[param1]/[param2]/...</span>
</div>
<table class="table">
<thead>
<tr class="table-info">
<th class="urlPath">URL Path</th>
<th class="returnType">Return Type</th>
<th class="methodParams">Method and Params</th>
</tr>
<thead>
<tbody>
%s
</tbody>
</table> </div> </body></html>
'''
RpcStyleTemplate = '''
<style type="text/css">
body {
font-family: -apple-system,system-ui,BlinkMacSystemFont,'Segoe UI',Roboto,'Helvetica Neue',Arial,sans-serif;
font-size: 1rem;
font-weight: 400;
line-height: 1.5;
color: #292b2c;
background-color: #fff;
margin: 0px;
padding: 0px;
}
table { background-color: transparent; display: table; border-collapse: separate; border-color: grey; }
.table { width: 100%; max-width: 100%; margin-bottom: 1rem; }
.table th { height: 30px; }
.table td, .table th { border-bottom: 1px solid #eceeef; text-align: left; padding-left: 16px;}
th.urlPath { width: 10%; }
th.returnType { width: 10%; }
th.methodParams { width: 80%; }
td.returnType { text-align: right; }
thead { display: table-header-group; vertical-align: middle; border-color: inherit;}
tbody { display: table-row-group; vertical-align: middle; border-color: inherit;}
tr { display: table-row; vertical-align: inherit; border-color: inherit; }
.table-info, .table-info>td, .table-info>th { background-color: #dff0d8; }
.url { margin: 4px 0; padding-left: 16px;}
</style>
'''
RpcMethodTemplate = '''
<tr>
<td class="urlPath"> <a href="%s"/>%s</a> </td>
<td class="returnType"></td>
<td class="methodParams">
<code><strong><a href="%s"/>%s</a></strong>(%s)</code>
</td>
</tr>
'''
def __init__(self, rpc_processor, url_prefix='/'):
self.rpc_processor = rpc_processor
self.url_prefix = url_prefix #golbal url_prefix
@route('/')
def index(self):
res = Message()
res.status = 200
res.headers[Protocol.CONTENT_TYPE] = 'text/html; charset=utf-8'
rpc = self.rpc_processor
info = ''
for urlpath in rpc.urlpath2method:
m = rpc.urlpath2method[urlpath].info
if not m.doc_enabled: continue
link = join_path(self.url_prefix, urlpath)
args = ', '.join(m.params)
info += RpcInfo.RpcMethodTemplate%(link, link, link, m.method , args)
res.body = RpcInfo.RpcInfoTemplate%(self.url_prefix, RpcInfo.RpcStyleTemplate, self.url_prefix, self.url_prefix, info)
return res
class Application:
log = logging.getLogger(__name__)
def __init__(self):
self.doc_url_prefix = '/doc'
self.doc_enabled = True
self.urlpath2method = Dict()
self.global_filters = []
self.error_pages = Dict() # status=>page generator
@staticmethod
def _get_filter(obj, filter_name='__class_filter'):
t = inspect.getmembers(obj)
for kv in t:
if kv[0] == filter_name:
f = kv[1]
return f[1]
def mount(self, prefix, service, doc_enabled=True):
if inspect.isclass(service):
service = service()
class_filter = RpcProcessor._get_filter(service, '__class_filter')
methods = inspect.getmembers(service, predicate=inspect.ismethod)
for method in methods:
method_name = str(method[0])
if method_name.startswith('_'):
continue
url_path = join_path(prefix, method_name)
http_methods = []
filter_array = []
if class_filter:
filter_array.append(class_filter)
exclude = False
if hasattr(method[1], '__path'):
url_path = join_path(prefix, getattr(method[1], '__path'))
if hasattr(method[1], '__method'):
http_method = getattr(method[1], '__method')
if isinstance(http_method, (tuple, list)):
http_methods = [k.upper() for k in http_method]
else:
http_methods = [http_method.upper()]
if hasattr(method[1], '__filter'):
req_filter = getattr(method[1], '__filter')
filter_array.append(req_filter)
if hasattr(method[1], '__exclude'):
exclude = getattr(method[1], '__exclude')
if url_path in self.urlpath2method:
self.log.warn('{URL=%s, method=%s} duplicated' % (url_path, method_name))
if exclude:
continue
params = inspect.getargspec(method[1])
info = Dict()
info.url_path = url_path
info.method = method_name
info.http_methods = http_methods
info.params = params.args[1:]
info.defaults = params.defaults or {}
info.doc_enabled = doc_enabled
method_instance = Dict()
method_instance.method = method[1]
method_instance.params = params.args[1:]
method_instance.info = info
method_instance.filter_array = filter_array
self.urlpath2method[url_path] = method_instance # (method[1], params.args, info)
def rpc_method_list(self):
res = []
for m in self.urlpath2method.values():
info = m.info
res.append({
'urlPath': info.url_path,
'method': info.method,
'httpMethods': info.http_methods,
'params': [{'name': p} for p in info.params],
'defaults': info.defaults,
'returnType': None
})
return res
def mount_doc(self, url_prefix='/'):
if self.doc_enabled:
if self.doc_url_prefix in self.urlpath2method: return
self.mount(self.doc_url_prefix, RpcInfo(self, url_prefix), doc_enabled=False)
def _reply(self, res, status, msg=None, request=None):
res.status = status
res.headers[Protocol.CONTENT_TYPE] = 'text/html; charset=utf8'
fn = self.error_pages[status]
if fn:
try:
res.body = fn(request=request, msg=msg)
except Exception:
error = traceback.format_exc()
self.log.error(error)
res.body = '%s'%(msg)
else:
if msg:
res.body = '%s'%(msg)
else:
res.body = 'Status=%s: Missing error page?'%status
def set_error_page(self, status, page_generator):
fn = page_generator
if isinstance(fn, str):
def error_page(**kvargs):
return page_generator
fn = error_page
self.error_pages[status] = fn
def _parse_params(self, s):
bb = s.split('?')
path = bb[0]
qs = None
if len(bb) > 1:
qs = ''.join(bb[1:])
bb = path.split('/')
res = [b for b in bb if b != '']
query_map = {}
if qs:
data = {}
bb = qs.split('&')
for kv in bb:
if kv == '': continue
kk = kv.split('=')
key = kk[0]
if len(kk) > 0:
val = ''.join(kk[1:])
if key in data:
val0 = data[key]
if not isinstance(val0, (tuple, list)):
val0 = [val0]
val0.append(val)
val = val0
data[key] = val
if len(data) > 0:
query_map = data
return (res, query_map)
def parse_request(self, req, res):
url = req.url
query_map = {}
if req.body:
content_type = req.content_type() #req.headers[Protocol.CONTENT_TYPE]
if content_type and (content_type.startswith('multipart/form-data')\
or content_type.startswith('application/x-www-form-urlencoded')):
if isinstance(req.body, str):
req.body = json.loads(req.body)
if 'files' in req.body and len(req.body['files']) == 0:
if 'attributes' in req.body:
query_map = req.body['attributes'] #form attributes data as query params
req.body = []
else:
req.body = [req.body]
elif content_type and content_type.startswith('application/json'):
req.body = json.loads(req.body, encoding='utf8')
if not isinstance(req.body, list):
if isinstance(req.body, dict):
req.body = Dict(req.body)
req.body = [req.body]
elif isinstance(req.body, list):
pass
else:
res.body = [res.body]
params = req.body or []
length = 0
target_method = None
target_path = None
for path in self.urlpath2method:
if url == path or url.startswith('%s/'%path) or url.startswith('%s?'%path):
if length < len(path):
length = len(path)
target_path, target_method = (path, self.urlpath2method[path])
if target_method is None:
self._reply(res, 404, msg='Url=%s Not Found'%(url), request=req)
return (None, None, None)
method_info = target_method.info
http_methods = method_info.http_methods
if http_methods:
if req.method is None:
method_str = ','.join(http_methods)
self._reply(res, 405, msg='Method(%s) required'%(method_str), request=req)
return (None, None, None)
if req.method.upper() not in http_methods:
self._reply(res, 405, msg='%s Not Allowed'%(req.method), request=req)
return (None, None, None)
if params == [] and len(query_map)==0:
params, query_map = self._parse_params(url[len(target_path):])
return (target_method, params, query_map)
def normalize_params(self, req, res, target_method, params, query_map, kvargs):
url = req.url
method_info = target_method.info
total_params = len(target_method.params)
kvargs_len = len(method_info.defaults)
args_len = total_params - kvargs_len
if len(params) == 0 and total_params == 1 and len(query_map) > 0:
params.append(Dict(query_map))
return True
if len(params) > total_params: #last parameter as path left
left = total_params -1
if left < 0: left = 0
params0 = params[0:left]
last = join_path(params[left:])
params0.append(last)
params[:] = []
for p in params0:
params.append(p)
return True
if len(query_map) == 0 and len(params) < args_len:
params_str = ','.join(method_info.params)
self._reply(res, 400, msg='URL=%s, Method=%s, Params=(%s), Bad Request'%(url, method_info.method, params_str), request=req)
return False
i = len(params)
while i<args_len: #take args from kvargs
arg_name = method_info.params[i]
if arg_name not in query_map:
params_str = ','.join(method_info.params)
self._reply(res, 400, msg='URL=%s, Method=%s, Params=(%s), Missing value for key=%s'%(url, method_info.method, params_str, arg_name), request=req)
return
else:
params.append(query_map[arg_name])
i += 1
i = len(params)
while i<total_params:
arg_name = method_info.params[i]
if arg_name not in query_map:
kvargs[arg_name] = method_info.defaults[i-args_len]
else:
kvargs[arg_name] = query_map[arg_name]
i += 1
return True
'''
query_map: query string or form body parsed dictionary
'''
def process(self, req, res):
context.request = req
context.response = res
for f in self.global_filters:
do_next = f(req, res)
if not do_next:
return
url = req.url
if not url:
self._reply(res, 400, msg='Missing url in request', request=req)
return
target_method, params, query_map = self.parse_request(req, res)
if not target_method: return
method = target_method.method
method_info = target_method.info
try:
kvargs = {}
ok = self.normalize_params(req, res, target_method, params, query_map, kvargs)
if not ok: return
filter_array = target_method.filter_array
if len(filter_array) > 0:
req.method_info = Dict()
req.method_info.method = method_info.method
req.method_info.params = params
for f in filter_array:
do_next = f(req, res)
if not do_next:
return
result = method(*params, **kvargs)
if isinstance(result, Message):
if result != res: #may be same
res.replace(result)
else:
res.headers[Protocol.CONTENT_TYPE] = 'application/json; charset=utf8'
res.body = result
if not res.status:
res.status = 200
except Exception:
error = traceback.format_exc()
self.log.error(error)
self._reply(res, 500, msg='<pre>%s</pre>'%error, request=req)
return
def __call__(self, *args):
return self.process(*args)
def add_filter(self, filter_fn):
self.global_filters.append(filter_fn)
def run(self, url='localhost:15555',
use_thread=True,
mq_mask=Protocol.MASK_DELETE_ON_EXIT,
mq_type = None,
channel = None,
heartbeat_interval = 30,
auth_enabled = False,
api_key = None,
secret_key = None
):
idx = url.find('://')
if idx < 0:
idx = 0
else:
idx = idx + 3
idx = url.find('/', idx)
server_address = url
url_prefix = '/'
if idx > 0:
server_address = url[0:idx]
url_prefix = url[idx:]
server = RpcServer(self)
server.mq_server_address = server_address
server.mq = url_prefix
server.use_thread = use_thread
server.mq_mask = mq_mask
server.mq_type = mq_type
server.channel = channel
server.heartbeat_interval = heartbeat_interval
server.auth_enabled = auth_enabled
server.api_key = api_key
server.secret_key = secret_key
server.start()
return server
class RpcProcessor(Application):
def __init__(self):
Application.__init__(self)
class RpcServer:
log = logging.getLogger(__name__)
def __init__(self, processor):
self.mq_server_address = None
self.mq = None
self.mq_type = None
self.mq_mask = None
self.channel = None
self.heartbeat_interval = 30
self.use_thread = False
self.auth_enabled = False
self.api_key = None
self.secret_key = None
self.processor = processor
self._mqclient = None
def enable_auth(self, api_key=None, secret_key=None, auth_enabled=True):
self.auth_enabled = auth_enabled
self.api_key = api_key
self.secret_key = secret_key
def start(self):
if self.mq_server_address is None:
raise Exception("missing mq_server_address")
if self.mq is None:
raise Exception("missing mq")
if self.channel is None:
self.channel = self.mq
self.processor.mount_doc(join_path(self.mq))
client = self._mqclient = MqClient(self.mq_server_address)
client.auth_enabled = self.auth_enabled
client.api_key = self.api_key
client.secret_key = self.secret_key
def create_mq(client):
def sub_cb(res):
if res.status != 200:
self.log.error(res)
else:
self.log.info(res)
def create_cb(res):
if res.status != 200:
self.log.error(res)
else:
self.log.info(res)
msg = Message()
msg.headers['cmd'] = 'sub'
msg.headers['mq'] = self.mq
msg.headers['channel'] = self.channel
client.invoke(msg, ondata=sub_cb)
msg = Message()
msg.headers['cmd'] = 'create'
msg.headers['mq'] = self.mq
msg.headers['mqType'] = self.mq_type
msg.headers['mqMask'] = self.mq_mask
msg.headers['channel'] = self.channel
msg.body = self.processor.rpc_method_list()
client.invoke(msg, ondata=create_cb)
url_prefix = join_path(self.mq)
def _rpc_handler(client, processor, req):
if req.status == 404:
create_mq(client)
return
if req.url and req.url.startswith(url_prefix):
req.url = req.url[len(url_prefix):]
req.url = join_path('/', req.url)
res = Message()
msgid = req.headers['id']
target = req.headers['source']
try:
processor(req, res)
except Exception:
error = traceback.format_exc()
self.log.error(error)
res.status = 500
res.headers[Protocol.CONTENT_TYPE] = 'text/plain; charset=utf8'
res.body = str(error)
if not res.status:
res.status = 200
res.headers['cmd'] = 'route'
res.headers['id'] = msgid
res.headers['target'] = target
client.send(res)
def rpc_handler(req):
if self.use_thread:
t = Thread(target=_rpc_handler, args=(client, self.processor, req))
t.start()
else:
_rpc_handler(client, self.processor, req)
client.add_mq_handler(mq=self.mq, channel=self.channel, handler=rpc_handler)
client.onopen = create_mq
client.connect()
def close(self):
if self._mqclient:
self._mqclient.close()
self._mqclient = None
class StaticResource(object):
log = logging.getLogger(__name__)
def __init__(self, base_dir=None, cache_enabled=False, mime_types_fn=None):
self.base_dir = base_dir
self.cache_enabled = cache_enabled
self.file_table = {}
self._mime_type_fn = mime_types_fn
self._mime_types = {
"js": "application/javascript",
"json": "application/json",
"css": "text/css",
"htm": "text/html",
"html": "text/html",
"svg": "image/svg+xml",
"gif": "image/gif",
"jpeg": "image/jpeg",
"jpg": "image/jpg",
"ico": "image/x-icon",
"png": "image/png",
"pdf": "application/pdf",
"zip": "application/zip",
"ttf": "application/x-font-ttf",
"eot": "font/opentype"
}
def mime_type(ext):
if self._mime_type_fn:
res = self._mime_type_fn(ext)
if not res: return res
if ext in self._mime_types:
return self._mime_types[ext]
return None
self.mime_type_fn = mime_type
@route('/')
def file(self, file_name):
file_path = os.sep.join(file_name.split('/'))
if file_path[0] == os.sep:
file_path = file_path[1:]
if self.base_dir:
file_path = os.path.join(self.base_dir, file_path)
if self.cache_enabled and file_path in self.file_table:
file_content = self.file_table[file_path]
else:
try:
file_content = file_read(file_path, encoding='utf8')
self.file_table[file_path] = file_content
except Exception:
error = traceback.format_exc()
self.log.error(error)
return Message(404, 'File=%s Not Found'%file_name)
res = Message()
res.status = 200
content_type = None
idx = file_name.rfind('.')
if idx != -1:
ext = file_name[idx+1:]
content_type = self.mime_type_fn(ext)
if not content_type:
content_type = 'text/plain'
res.headers[Protocol.CONTENT_TYPE] = '%s; charset=utf8'%content_type
res.body = file_content
return res
class Template(object):
def __init__(self, base_dir='./', cache_enabled=False, ctx=None):
self.base_dir = base_dir
self.cache_enabled = cache_enabled
import jinja2
self.ctx = ctx
self.template_env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.base_dir))
def render(self, tpl_file, **kvargs):
tpl = self.template_env.get_template(tpl_file)
if self.ctx and 'ctx' not in kvargs:
kvargs['ctx'] = self.ctx
s = tpl.render(**kvargs)
res = Message()
res.status = 200
res.headers[Protocol.CONTENT_TYPE] = 'text/html; charset=utf8'
res.body = s
return res
def __call__(self, tpl_file, **kvargs):
return self.render(tpl_file, **kvargs)
class Controller:
def __init__(self, db=None, template=None):
from sqlalchemy.orm import sessionmaker
self.sessionmaker = sessionmaker
from sqlalchemy.sql import text
self.text = text
self.template = template
self.db = db
if self.db:
self.Session = sessionmaker(bind=self.db)
if self.template:
self.url_prefix = self.template.ctx['url_prefix'] or ''
@exclude()
def redirect(self, location):
return redirect(join_path(self.url_prefix, location))
@exclude()
def render(self, tpl_file, **kvargs):
return self.template.render(tpl_file, **kvargs)
@exclude()
def query(self, sql, converter=None, **kvargs):
with self.db.connect() as con:
sql = self.text(sql)
rs = con.execute(sql, **kvargs)
def c(row):
if not converter: return Dict(row)
return converter(row)
return [c(row) for row in rs]
@exclude()
def execute_sql(self, sql, **kvargs):
with self.db.connect() as con:
sql = self.text(sql)
return con.execute(sql, **kvargs)
@exclude()
def query_one(self, sql, converter=None, **kvargs):
res = self.query(sql, converter, **kvargs)
if len(res) > 0: return res[0]
@exclude()
def save(self, m):
sess = self.Session()
sess.merge(m)
sess.commit()
sess.close()
return {c.name: getattr(m, c.name) for c in m.__table__.columns} #json issue
def rpc_service(rpc_processor, server_address='localhost:15555', url_prefix='/', use_thread=True):
server = RpcServer(rpc_processor)
server.mq_server_address = server_address
server.mq = url_prefix
server.use_thread = use_thread
server.mq_mask = Protocol.MASK_DELETE_ON_EXIT#delete on exit
return server
def start_service(rpc_processor, server_address='localhost:15555', url_prefix='/', use_thread=True):
server = rpc_service(rpc_processor, server_address=server_address, url_prefix=url_prefix, use_thread=use_thread)
server.start() | zbuspy | /zbuspy-1.2.0.zip/zbuspy-1.2.0/zbus/zbus.py | zbus.py |
.. image:: https://pypip.in/download/zbx-dashboard/badge.svg
:target: https://pypi.python.org/pypi/zbx-dashboard/
:alt: Downloads
.. image:: https://pypip.in/version/zbx-dashboard/badge.svg
:target: https://pypi.python.org/pypi/zbx-dashboard/
:alt: Latest Version
.. image:: https://travis-ci.org/banzayats/zbx-dashboard.svg?branch=master
:target: https://travis-ci.org/banzayats/zbx-dashboard
:alt: Travis CI
===============================================
zbx_dashboard
===============================================
`zbx_dashboard` is a simple Django applicattion that provides an alternative to the Zabbix screens.
It allows users who are not registered in Zabbix being able to view the graphs and (in the future) more data from Zabbix.
Prerequisites
===============================================
- Django 1.5.*, 1.6.*, 1.7.*
- Python 2.6.8+, 2.7.*
Main features
===============================================
- Group graphs in separate dashboards
- Rearrange graphs on the dashboard
- Each dashboard and the graph can be provided a brief description
- Dashboards may belong to different groups of users
Installation
===============================================
1. Install latest stable version from PyPI:
.. code-block:: none
$ pip install zbx-dashboard
Or latest stable version from GitHub:
.. code-block:: none
$ pip install -e git+https://github.com/banzayats/zbx-dashboard@stable#egg=zbx-dashboard
2. Edit your projects' Django settings:
.. code-block:: python
INSTALLED_APPS = (
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'tinymce',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'zbx_dashboard',
)
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/boards'
# Set up your Zabbix server credentials
ZABBIX_URL = 'https://zabbix.org/zabbix/'
ZABBIX_USER = 'guest'
ZABBIX_PASS = ''
# TinyMCE
TINYMCE_DEFAULT_CONFIG = {
'mode': 'exact',
'theme': "advanced",
'relative_urls': False,
'width': 400,
'height': 200,
'plugins': 'inlinepopups,preview,media,contextmenu,paste,fullscreen,noneditable,visualchars,nonbreaking,xhtmlxtras',
'theme_advanced_buttons1': 'fullscreen,|,bold,italic,underline,strikethrough,|,sub,sup,|,bullist,numlist,|,outdent,indent,|,formatselect,removeformat,|,preview,code',
'theme_simple_toolbar_location': 'top',
'theme_advanced_toolbar_align': 'left',
}
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
3. Add to urls.py:
.. code-block:: python
from django.contrib.auth.views import login, logout
urlpatterns = patterns('',
# ...
url(r'^admin_tools/', include('admin_tools.urls')),
url(r'^boards/', include('zbx_dashboard.urls', namespace="boards")),
url(r'^accounts/login/$', login, name='login'),
url(r'^accounts/logout/$', logout, name='logout'),
url(r'^tinymce/', include('tinymce.urls')),
)
4. Run:
.. code-block:: none
$ python manage.py syncdb
This creates a few tables in your database that are necessary for operation.
5. Make ``static`` directory in your projects' root directory and run:
.. code-block:: none
$ python manage.py collectstatic
6. Test the application. Run the development server:
.. code-block:: none
$ python manage.py runserver 0.0.0.0:5000
Demo
===============================================
Demo site: http://boyard.pp.ua
login: admin, password: admin
| zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/README.rst | README.rst |
from zbx_dashboard.models import Board
from zbx_dashboard.forms import SelectForm, GraphFormSet, BoardForm
from zbx_dashboard.utils import zbx_get_graphs, zbx_get_screen_name
from django.views.generic import ListView, DetailView, CreateView, UpdateView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import HttpResponseRedirect
class BoardListView(ListView):
"""
List of all available dashboards
"""
model = Board
template_name = 'zbx_dashboard/list.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(BoardListView, self).dispatch(request, *args, **kwargs)
# Filter dashboards by current user group id
def get_queryset(self):
try:
user_id = self.request.user.groups.all()[0].id
except IndexError:
user_id = 0
return Board.objects.filter(
groups__id=user_id
)
class BoardDetailView(DetailView):
"""
Dashboard view
"""
model = Board
template_name = 'zbx_dashboard/detail.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(BoardDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BoardDetailView, self).get_context_data(**kwargs)
context['form'] = SelectForm(initial=self.request.GET)
context['period'] = self.request.GET.get('select', 86400)
return context
# Prevent user changing URL <pk> to see other dashboards
def get_queryset(self):
try:
user_id = self.request.user.groups.all()[0].id
except IndexError:
user_id = 0
return Board.objects.filter(
groups__id=user_id
)
class BoardCreateView(CreateView):
model = Board
template_name = 'zbx_dashboard/add.html'
form_class = BoardForm
def __init__(self):
self.object = None
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(BoardCreateView, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
"""
Handles GET requests and instantiates blank versions of the form
and its inline formsets.
"""
screen_id = self.request.GET.get('screen_id')
initials = zbx_get_graphs(screen_id) # get inital items from zabbix
form_class = self.get_form_class()
form = self.get_form(form_class)
form.initial = {'title': zbx_get_screen_name(screen_id)}
graph_form = GraphFormSet()
graph_form.extra = len(initials) + 1
for subform, data in zip(graph_form.forms, initials):
subform.initial = data
return self.render_to_response(
self.get_context_data(form=form, graph_form=graph_form)
)
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance and its inline
formsets with the passed POST variables and then checking them for
validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
graph_form = GraphFormSet(self.request.POST)
if form.is_valid() and graph_form.is_valid():
return self.form_valid(form, graph_form)
else:
return self.form_invalid(form, graph_form)
def form_valid(self, form, graph_form):
"""
Called if all forms are valid. Creates a Board instance along with
associated Graphs and then redirects to a success page.
"""
self.object = form.save()
graph_form.instance = self.object
graph_form.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form, graph_form):
"""
Called if a form is invalid. Re-renders the context data with the
data-filled forms and errors.
"""
return self.render_to_response(
self.get_context_data(form=form, graph_form=graph_form)
)
class BoardUpdateView(UpdateView):
model = Board
template_name = 'zbx_dashboard/update.html'
form_class = BoardForm
def __init__(self):
self.object = None
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(BoardUpdateView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(BoardUpdateView, self).get_context_data(**kwargs)
if self.request.POST:
context['graph_form'] = GraphFormSet(self.request.POST,
instance=self.object)
else:
context['graph_form'] = GraphFormSet(instance=self.object)
return context
def form_valid(self, form):
self.object = form.save(commit=False)
context = self.get_context_data()
graph_form = context['graph_form']
if graph_form.is_valid():
self.object = form.save()
graph_form.instance = self.object
graph_form.save()
return HttpResponseRedirect(self.get_success_url())
else:
return self.render_to_response(self.get_context_data(form=form))
# Prevent user changing URL <pk> to see other dashboard
def get_queryset(self):
try:
user_id = self.request.user.groups.all()[0].id
except IndexError:
user_id = 0
return Board.objects.filter(
groups__id=user_id
) | zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/zbx_dashboard/views.py | views.py |
from django.db import models
from django.contrib.auth.models import Group
from tinymce import models as tinymce_models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
import base64
from StringIO import StringIO
import sys
sys.path.append('/usr/lib64/python2.6/site-packages/')
import pycurl
class Board(models.Model):
title = models.CharField(
verbose_name=_(u'Title'),
max_length=255
)
description = models.TextField(
verbose_name=_('Short description'),
max_length=1000,
blank=True,
null=True
)
groups = models.ManyToManyField(
Group,
blank=True,
verbose_name=_('Linked groups'),
)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/boards/%i/" % self.id
def get_update_url(self):
return "/boards/update/%i/" % self.id
def get_groups(self):
return ", ".join([p.name for p in self.groups.all()])
get_groups.short_description = _('Linked groups')
class Meta:
verbose_name = _('Board')
verbose_name_plural = _('Boards')
class Graph(models.Model):
widget = models.ForeignKey(Board)
title = models.CharField(
verbose_name=_(u'Title'),
max_length=255
)
graph_id = models.CharField(
verbose_name=_(u'Graph ID'),
max_length=8
)
description = tinymce_models.HTMLField(
verbose_name=_(u'Graph description'),
max_length=1000,
blank=True,
null=True,
)
def __unicode__(self):
return self.title
def get_b64_img(self, period=86400):
buff = StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.COOKIEFILE, "")
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
login_url = settings.ZABBIX_URL + 'index.php?login=1'
curl.setopt(pycurl.URL, login_url)
curl.setopt(pycurl.POST, 1)
curl.setopt(
pycurl.HTTPPOST,
[
('name', settings.ZABBIX_USER),
('password', settings.ZABBIX_PASS),
('enter', 'Sign in'),
('autologin', '1'),
('request', '')
]
)
curl.perform()
curl.setopt(pycurl.POST, 0)
img_url = settings.ZABBIX_URL + 'chart2.php?graphid=' + \
str(self.graph_id) + '&width=400&height=200&period=' + str(period)
curl.setopt(pycurl.URL, img_url)
curl.setopt(pycurl.WRITEFUNCTION, buff.write)
curl.perform()
img = buff.getvalue()
buff.close()
curl.close()
return "data:image/jpg;base64,%s" % base64.b64encode(img)
class Meta:
verbose_name = _('Graph')
verbose_name_plural = _('Graphs') | zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/zbx_dashboard/models.py | models.py |
if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.2.0",d.prototype.close=function(b){function c(){f.detach().trigger("closed.bs.alert").remove()}var d=a(this),e=d.attr("data-target");e||(e=d.attr("href"),e=e&&e.replace(/.*(?=#[^\s]*$)/,""));var f=a(e);b&&b.preventDefault(),f.length||(f=d.hasClass("alert")?d:d.parent()),f.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",c).emulateTransitionEnd(150):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.2.0",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),d[e](null==f[b]?this.options[b]:f[b]),setTimeout(a.proxy(function(){"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$element.hasClass("active")).trigger("change")}a&&this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),c.preventDefault()})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b).on("keydown.bs.carousel",a.proxy(this.keydown,this)),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,"hover"==this.options.pause&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.2.0",c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0},c.prototype.keydown=function(a){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.to=function(b){var c=this,d=this.getItemIndex(this.$active=this.$element.find(".item.active"));return b>this.$items.length-1||0>b?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){c.to(b)}):d==b?this.pause().cycle():this.slide(b>d?"next":"prev",a(this.$items[b]))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,c){var d=this.$element.find(".item.active"),e=c||d[b](),f=this.interval,g="next"==b?"left":"right",h="next"==b?"first":"last",i=this;if(!e.length){if(!this.options.wrap)return;e=this.$element.find(".item")[h]()}if(e.hasClass("active"))return this.sliding=!1;var j=e[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:g});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,f&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(e)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:g});return a.support.transition&&this.$element.hasClass("slide")?(e.addClass(b),e[0].offsetWidth,d.addClass(g),e.addClass(g),d.one("bsTransitionEnd",function(){e.removeClass([b,g].join(" ")).addClass("active"),d.removeClass(["active",g].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(1e3*d.css("transition-duration").slice(0,-1))):(d.removeClass("active"),e.addClass("active"),this.sliding=!1,this.$element.trigger(m)),f&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this},a(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}}),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.collapse"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b);!e&&f.toggle&&"show"==b&&(b=!b),e||d.data("bs.collapse",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.transitioning=null,this.options.parent&&(this.$parent=a(this.options.parent)),this.options.toggle&&this.toggle()};c.VERSION="3.2.0",c.DEFAULTS={toggle:!0},c.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},c.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var c=a.Event("show.bs.collapse");if(this.$element.trigger(c),!c.isDefaultPrevented()){var d=this.$parent&&this.$parent.find("> .panel > .in");if(d&&d.length){var e=d.data("bs.collapse");if(e&&e.transitioning)return;b.call(d,"hide"),e||d.data("bs.collapse",null)}var f=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[f](0),this.transitioning=1;var g=function(){this.$element.removeClass("collapsing").addClass("collapse in")[f](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return g.call(this);var h=a.camelCase(["scroll",f].join("-"));this.$element.one("bsTransitionEnd",a.proxy(g,this)).emulateTransitionEnd(350)[f](this.$element[0][h])}}},c.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse").removeClass("in"),this.transitioning=1;var d=function(){this.transitioning=0,this.$element.trigger("hidden.bs.collapse").removeClass("collapsing").addClass("collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(d,this)).emulateTransitionEnd(350):d.call(this)}}},c.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()};var d=a.fn.collapse;a.fn.collapse=b,a.fn.collapse.Constructor=c,a.fn.collapse.noConflict=function(){return a.fn.collapse=d,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(c){var d,e=a(this),f=e.attr("data-target")||c.preventDefault()||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""),g=a(f),h=g.data("bs.collapse"),i=h?"toggle":e.data(),j=e.attr("data-parent"),k=j&&a(j);h&&h.transitioning||(k&&k.find('[data-toggle="collapse"][data-parent="'+j+'"]').not(e).addClass("collapsed"),e[g.hasClass("in")?"addClass":"removeClass"]("collapsed")),b.call(g,i)})}(jQuery),+function(a){"use strict";function b(b){b&&3===b.which||(a(e).remove(),a(f).each(function(){var d=c(a(this)),e={relatedTarget:this};d.hasClass("open")&&(d.trigger(b=a.Event("hide.bs.dropdown",e)),b.isDefaultPrevented()||d.removeClass("open").trigger("hidden.bs.dropdown",e))}))}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.2.0",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a('<div class="dropdown-backdrop"/>').insertAfter(a(this)).on("click",b);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus"),f.toggleClass("open").trigger("shown.bs.dropdown",h)}return!1}},g.prototype.keydown=function(b){if(/(38|40|27)/.test(b.keyCode)){var d=a(this);if(b.preventDefault(),b.stopPropagation(),!d.is(".disabled, :disabled")){var e=c(d),g=e.hasClass("open");if(!g||g&&27==b.keyCode)return 27==b.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.divider):visible a",i=e.find('[role="menu"]'+h+', [role="listbox"]'+h);if(i.length){var j=i.index(i.filter(":focus"));38==b.keyCode&&j>0&&j--,40==b.keyCode&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",b).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f+', [role="menu"], [role="listbox"]',g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$backdrop=this.isShown=null,this.scrollbarWidth=0,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.2.0",c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var c=this,d=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(d),this.isShown||d.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.$body.addClass("modal-open"),this.setScrollbar(),this.escape(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.backdrop(function(){var d=a.support.transition&&c.$element.hasClass("fade");c.$element.parent().length||c.$element.appendTo(c.$body),c.$element.show().scrollTop(0),d&&c.$element[0].offsetWidth,c.$element.addClass("in").attr("aria-hidden",!1),c.enforceFocus();var e=a.Event("shown.bs.modal",{relatedTarget:b});d?c.$element.find(".modal-dialog").one("bsTransitionEnd",function(){c.$element.trigger("focus").trigger(e)}).emulateTransitionEnd(300):c.$element.trigger("focus").trigger(e)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.$body.removeClass("modal-open"),this.resetScrollbar(),this.escape(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").attr("aria-hidden",!0).off("click.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(300):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keyup.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keyup.dismiss.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var c=this,d=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var e=a.support.transition&&d;if(this.$backdrop=a('<div class="modal-backdrop '+d+'" />').appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus.call(this.$element[0]):this.hide.call(this))},this)),e&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;e?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(150):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var f=function(){c.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",f).emulateTransitionEnd(150):f()}else b&&b()},c.prototype.checkScrollbar=function(){document.body.clientWidth>=window.innerWidth||(this.scrollbarWidth=this.scrollbarWidth||this.measureScrollbar())},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.scrollbarWidth&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right","")},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;(e||"destroy"!=b)&&(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=this.options=this.enabled=this.timeout=this.hoverState=this.$element=null,this.init("tooltip",a,b)};c.VERSION="3.2.0",c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(this.options.viewport.selector||this.options.viewport);for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show()},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var c=a.contains(document.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!c)return;var d=this,e=this.tip(),f=this.getUID(this.type);this.setContent(),e.attr("id",f),this.$element.attr("aria-describedby",f),this.options.animation&&e.addClass("fade");var g="function"==typeof this.options.placement?this.options.placement.call(this,e[0],this.$element[0]):this.options.placement,h=/\s?auto?\s?/i,i=h.test(g);i&&(g=g.replace(h,"")||"top"),e.detach().css({top:0,left:0,display:"block"}).addClass(g).data("bs."+this.type,this),this.options.container?e.appendTo(this.options.container):e.insertAfter(this.$element);var j=this.getPosition(),k=e[0].offsetWidth,l=e[0].offsetHeight;if(i){var m=g,n=this.$element.parent(),o=this.getPosition(n);g="bottom"==g&&j.top+j.height+l-o.scroll>o.height?"top":"top"==g&&j.top-o.scroll-l<0?"bottom":"right"==g&&j.right+k>o.width?"left":"left"==g&&j.left-k<o.left?"right":g,e.removeClass(m).addClass(g)}var p=this.getCalculatedOffset(g,j,k,l);this.applyPlacement(p,g);var q=function(){d.$element.trigger("shown.bs."+d.type),d.hoverState=null};a.support.transition&&this.$tip.hasClass("fade")?e.one("bsTransitionEnd",q).emulateTransitionEnd(150):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top=b.top+g,b.left=b.left+h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=k.left?2*k.left-e+i:2*k.top-f+j,m=k.left?"left":"top",n=k.left?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(l,d[0][n],m)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c,a?50*(1-a/b)+"%":"")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(){function b(){"in"!=c.hoverState&&d.detach(),c.$element.trigger("hidden.bs."+c.type)}var c=this,d=this.tip(),e=a.Event("hide.bs."+this.type);return this.$element.removeAttr("aria-describedby"),this.$element.trigger(e),e.isDefaultPrevented()?void 0:(d.removeClass("in"),a.support.transition&&this.$tip.hasClass("fade")?d.one("bsTransitionEnd",b).emulateTransitionEnd(150):b(),this.hoverState=null,this)},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName;return a.extend({},"function"==typeof c.getBoundingClientRect?c.getBoundingClientRect():null,{scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop(),width:d?a(window).width():b.outerWidth(),height:d?a(window).height():b.outerHeight()},d?{top:0,left:0}:b.offset())},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.width&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){return this.$tip=this.$tip||a(this.options.template)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.validate=function(){this.$element[0].parentNode||(this.hide(),this.$element=null,this.options=null)},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){clearTimeout(this.timeout),this.hide().$element.off("."+this.type).removeData("bs."+this.type)};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;(e||"destroy"!=b)&&(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.2.0",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").empty()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")},c.prototype.tip=function(){return this.$tip||(this.$tip=a(this.options.template)),this.$tip};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){var e=a.proxy(this.process,this);this.$body=a("body"),this.$scrollElement=a(a(c).is("body")?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",e),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.2.0",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b="offset",c=0;a.isWindow(this.$scrollElement[0])||(b="position",c=this.$scrollElement.scrollTop()),this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight();var d=this;this.$body.find(this.selector).map(function(){var d=a(this),e=d.data("target")||d.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[b]().top+c,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){d.offsets.push(this[0]),d.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<=e[0])return g!=(a=f[0])&&this.activate(a);for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(!e[a+1]||b<=e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,a(this.selector).parentsUntil(this.options.target,".active").removeClass("active");var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.2.0",c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a")[0],f=a.Event("show.bs.tab",{relatedTarget:e});if(b.trigger(f),!f.isDefaultPrevented()){var g=a(d);this.activate(b.closest("li"),c),this.activate(g,g.parent(),function(){b.trigger({type:"shown.bs.tab",relatedTarget:e})})}}},c.prototype.activate=function(b,c,d){function e(){f.removeClass("active").find("> .dropdown-menu > .active").removeClass("active"),b.addClass("active"),g?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu")&&b.closest("li.dropdown").addClass("active"),d&&d()}var f=c.find("> .active"),g=d&&a.support.transition&&f.hasClass("fade");g?f.one("bsTransitionEnd",e).emulateTransitionEnd(150):e(),f.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this},a(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"]',function(c){c.preventDefault(),b.call(a(this),"show")})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=this.unpin=this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.2.0",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=a(document).height(),d=this.$target.scrollTop(),e=this.$element.offset(),f=this.options.offset,g=f.top,h=f.bottom;"object"!=typeof f&&(h=g=f),"function"==typeof g&&(g=f.top(this.$element)),"function"==typeof h&&(h=f.bottom(this.$element));var i=null!=this.unpin&&d+this.unpin<=e.top?!1:null!=h&&e.top+this.$element.height()>=b-h?"bottom":null!=g&&g>=d?"top":!1;if(this.affixed!==i){null!=this.unpin&&this.$element.css("top","");var j="affix"+(i?"-"+i:""),k=a.Event(j+".bs.affix");this.$element.trigger(k),k.isDefaultPrevented()||(this.affixed=i,this.unpin="bottom"==i?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(j).trigger(a.Event(j.replace("affix","affixed"))),"bottom"==i&&this.$element.offset({top:b-this.$element.height()-h}))}}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},d.offsetBottom&&(d.offset.bottom=d.offsetBottom),d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery); | zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/zbx_dashboard/static/zbx_dashboard/js/bootstrap.min.js | bootstrap.min.js |
(function (factory) {
if (typeof define === 'function' && define.amd) {
// AMD
define(['jquery'], factory);
} else if (typeof exports === 'object') {
// CommonJS
factory(require('jquery'));
} else {
// Browser globals
factory(jQuery);
}
}(function ($) {
var pluses = /\+/g;
function encode(s) {
return config.raw ? s : encodeURIComponent(s);
}
function decode(s) {
return config.raw ? s : decodeURIComponent(s);
}
function stringifyCookieValue(value) {
return encode(config.json ? JSON.stringify(value) : String(value));
}
function parseCookieValue(s) {
if (s.indexOf('"') === 0) {
// This is a quoted cookie as according to RFC2068, unescape...
s = s.slice(1, -1).replace(/\\"/g, '"').replace(/\\\\/g, '\\');
}
try {
// Replace server-side written pluses with spaces.
// If we can't decode the cookie, ignore it, it's unusable.
// If we can't parse the cookie, ignore it, it's unusable.
s = decodeURIComponent(s.replace(pluses, ' '));
return config.json ? JSON.parse(s) : s;
} catch(e) {}
}
function read(s, converter) {
var value = config.raw ? s : parseCookieValue(s);
return $.isFunction(converter) ? converter(value) : value;
}
var config = $.cookie = function (key, value, options) {
// Write
if (value !== undefined && !$.isFunction(value)) {
options = $.extend({}, config.defaults, options);
if (typeof options.expires === 'number') {
var days = options.expires, t = options.expires = new Date();
t.setTime(+t + days * 864e+5);
}
return (document.cookie = [
encode(key), '=', stringifyCookieValue(value),
options.expires ? '; expires=' + options.expires.toUTCString() : '', // use expires attribute, max-age is not supported by IE
options.path ? '; path=' + options.path : '',
options.domain ? '; domain=' + options.domain : '',
options.secure ? '; secure' : ''
].join(''));
}
// Read
var result = key ? undefined : {};
// To prevent the for loop in the first place assign an empty array
// in case there are no cookies at all. Also prevents odd result when
// calling $.cookie().
var cookies = document.cookie ? document.cookie.split('; ') : [];
for (var i = 0, l = cookies.length; i < l; i++) {
var parts = cookies[i].split('=');
var name = decode(parts.shift());
var cookie = parts.join('=');
if (key && key === name) {
// If second argument (value) is a function it's a converter...
result = read(cookie, value);
break;
}
// Prevent storing a cookie that we couldn't decode.
if (!key && (cookie = read(cookie)) !== undefined) {
result[name] = cookie;
}
}
return result;
};
config.defaults = {};
$.removeCookie = function (key, options) {
if ($.cookie(key) === undefined) {
return false;
}
// Must not alter options, thus extending a fresh object...
$.cookie(key, '', $.extend({}, options, { expires: -1 }));
return !$.cookie(key);
};
})); | zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/zbx_dashboard/static/zbx_dashboard/js/jquery.cookie.js | jquery.cookie.js |
if (typeof jQuery === 'undefined') { throw new Error('Bootstrap\'s JavaScript requires jQuery') }
/* ========================================================================
* Bootstrap: transition.js v3.2.0
* http://getbootstrap.com/javascript/#transitions
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)
// ============================================================
function transitionEnd() {
var el = document.createElement('bootstrap')
var transEndEventNames = {
WebkitTransition : 'webkitTransitionEnd',
MozTransition : 'transitionend',
OTransition : 'oTransitionEnd otransitionend',
transition : 'transitionend'
}
for (var name in transEndEventNames) {
if (el.style[name] !== undefined) {
return { end: transEndEventNames[name] }
}
}
return false // explicit for ie8 ( ._.)
}
// http://blog.alexmaccaw.com/css-transitions
$.fn.emulateTransitionEnd = function (duration) {
var called = false
var $el = this
$(this).one('bsTransitionEnd', function () { called = true })
var callback = function () { if (!called) $($el).trigger($.support.transition.end) }
setTimeout(callback, duration)
return this
}
$(function () {
$.support.transition = transitionEnd()
if (!$.support.transition) return
$.event.special.bsTransitionEnd = {
bindType: $.support.transition.end,
delegateType: $.support.transition.end,
handle: function (e) {
if ($(e.target).is(this)) return e.handleObj.handler.apply(this, arguments)
}
}
})
}(jQuery);
/* ========================================================================
* Bootstrap: alert.js v3.2.0
* http://getbootstrap.com/javascript/#alerts
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// ALERT CLASS DEFINITION
// ======================
var dismiss = '[data-dismiss="alert"]'
var Alert = function (el) {
$(el).on('click', dismiss, this.close)
}
Alert.VERSION = '3.2.0'
Alert.prototype.close = function (e) {
var $this = $(this)
var selector = $this.attr('data-target')
if (!selector) {
selector = $this.attr('href')
selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
var $parent = $(selector)
if (e) e.preventDefault()
if (!$parent.length) {
$parent = $this.hasClass('alert') ? $this : $this.parent()
}
$parent.trigger(e = $.Event('close.bs.alert'))
if (e.isDefaultPrevented()) return
$parent.removeClass('in')
function removeElement() {
// detach from parent, fire event then clean up data
$parent.detach().trigger('closed.bs.alert').remove()
}
$.support.transition && $parent.hasClass('fade') ?
$parent
.one('bsTransitionEnd', removeElement)
.emulateTransitionEnd(150) :
removeElement()
}
// ALERT PLUGIN DEFINITION
// =======================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.alert')
if (!data) $this.data('bs.alert', (data = new Alert(this)))
if (typeof option == 'string') data[option].call($this)
})
}
var old = $.fn.alert
$.fn.alert = Plugin
$.fn.alert.Constructor = Alert
// ALERT NO CONFLICT
// =================
$.fn.alert.noConflict = function () {
$.fn.alert = old
return this
}
// ALERT DATA-API
// ==============
$(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)
}(jQuery);
/* ========================================================================
* Bootstrap: button.js v3.2.0
* http://getbootstrap.com/javascript/#buttons
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// BUTTON PUBLIC CLASS DEFINITION
// ==============================
var Button = function (element, options) {
this.$element = $(element)
this.options = $.extend({}, Button.DEFAULTS, options)
this.isLoading = false
}
Button.VERSION = '3.2.0'
Button.DEFAULTS = {
loadingText: 'loading...'
}
Button.prototype.setState = function (state) {
var d = 'disabled'
var $el = this.$element
var val = $el.is('input') ? 'val' : 'html'
var data = $el.data()
state = state + 'Text'
if (data.resetText == null) $el.data('resetText', $el[val]())
$el[val](data[state] == null ? this.options[state] : data[state])
// push to event loop to allow forms to submit
setTimeout($.proxy(function () {
if (state == 'loadingText') {
this.isLoading = true
$el.addClass(d).attr(d, d)
} else if (this.isLoading) {
this.isLoading = false
$el.removeClass(d).removeAttr(d)
}
}, this), 0)
}
Button.prototype.toggle = function () {
var changed = true
var $parent = this.$element.closest('[data-toggle="buttons"]')
if ($parent.length) {
var $input = this.$element.find('input')
if ($input.prop('type') == 'radio') {
if ($input.prop('checked') && this.$element.hasClass('active')) changed = false
else $parent.find('.active').removeClass('active')
}
if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change')
}
if (changed) this.$element.toggleClass('active')
}
// BUTTON PLUGIN DEFINITION
// ========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.button')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.button', (data = new Button(this, options)))
if (option == 'toggle') data.toggle()
else if (option) data.setState(option)
})
}
var old = $.fn.button
$.fn.button = Plugin
$.fn.button.Constructor = Button
// BUTTON NO CONFLICT
// ==================
$.fn.button.noConflict = function () {
$.fn.button = old
return this
}
// BUTTON DATA-API
// ===============
$(document).on('click.bs.button.data-api', '[data-toggle^="button"]', function (e) {
var $btn = $(e.target)
if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')
Plugin.call($btn, 'toggle')
e.preventDefault()
})
}(jQuery);
/* ========================================================================
* Bootstrap: carousel.js v3.2.0
* http://getbootstrap.com/javascript/#carousel
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// CAROUSEL CLASS DEFINITION
// =========================
var Carousel = function (element, options) {
this.$element = $(element).on('keydown.bs.carousel', $.proxy(this.keydown, this))
this.$indicators = this.$element.find('.carousel-indicators')
this.options = options
this.paused =
this.sliding =
this.interval =
this.$active =
this.$items = null
this.options.pause == 'hover' && this.$element
.on('mouseenter.bs.carousel', $.proxy(this.pause, this))
.on('mouseleave.bs.carousel', $.proxy(this.cycle, this))
}
Carousel.VERSION = '3.2.0'
Carousel.DEFAULTS = {
interval: 5000,
pause: 'hover',
wrap: true
}
Carousel.prototype.keydown = function (e) {
switch (e.which) {
case 37: this.prev(); break
case 39: this.next(); break
default: return
}
e.preventDefault()
}
Carousel.prototype.cycle = function (e) {
e || (this.paused = false)
this.interval && clearInterval(this.interval)
this.options.interval
&& !this.paused
&& (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
return this
}
Carousel.prototype.getItemIndex = function (item) {
this.$items = item.parent().children('.item')
return this.$items.index(item || this.$active)
}
Carousel.prototype.to = function (pos) {
var that = this
var activeIndex = this.getItemIndex(this.$active = this.$element.find('.item.active'))
if (pos > (this.$items.length - 1) || pos < 0) return
if (this.sliding) return this.$element.one('slid.bs.carousel', function () { that.to(pos) }) // yes, "slid"
if (activeIndex == pos) return this.pause().cycle()
return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos]))
}
Carousel.prototype.pause = function (e) {
e || (this.paused = true)
if (this.$element.find('.next, .prev').length && $.support.transition) {
this.$element.trigger($.support.transition.end)
this.cycle(true)
}
this.interval = clearInterval(this.interval)
return this
}
Carousel.prototype.next = function () {
if (this.sliding) return
return this.slide('next')
}
Carousel.prototype.prev = function () {
if (this.sliding) return
return this.slide('prev')
}
Carousel.prototype.slide = function (type, next) {
var $active = this.$element.find('.item.active')
var $next = next || $active[type]()
var isCycling = this.interval
var direction = type == 'next' ? 'left' : 'right'
var fallback = type == 'next' ? 'first' : 'last'
var that = this
if (!$next.length) {
if (!this.options.wrap) return
$next = this.$element.find('.item')[fallback]()
}
if ($next.hasClass('active')) return (this.sliding = false)
var relatedTarget = $next[0]
var slideEvent = $.Event('slide.bs.carousel', {
relatedTarget: relatedTarget,
direction: direction
})
this.$element.trigger(slideEvent)
if (slideEvent.isDefaultPrevented()) return
this.sliding = true
isCycling && this.pause()
if (this.$indicators.length) {
this.$indicators.find('.active').removeClass('active')
var $nextIndicator = $(this.$indicators.children()[this.getItemIndex($next)])
$nextIndicator && $nextIndicator.addClass('active')
}
var slidEvent = $.Event('slid.bs.carousel', { relatedTarget: relatedTarget, direction: direction }) // yes, "slid"
if ($.support.transition && this.$element.hasClass('slide')) {
$next.addClass(type)
$next[0].offsetWidth // force reflow
$active.addClass(direction)
$next.addClass(direction)
$active
.one('bsTransitionEnd', function () {
$next.removeClass([type, direction].join(' ')).addClass('active')
$active.removeClass(['active', direction].join(' '))
that.sliding = false
setTimeout(function () {
that.$element.trigger(slidEvent)
}, 0)
})
.emulateTransitionEnd($active.css('transition-duration').slice(0, -1) * 1000)
} else {
$active.removeClass('active')
$next.addClass('active')
this.sliding = false
this.$element.trigger(slidEvent)
}
isCycling && this.cycle()
return this
}
// CAROUSEL PLUGIN DEFINITION
// ==========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.carousel')
var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)
var action = typeof option == 'string' ? option : options.slide
if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))
if (typeof option == 'number') data.to(option)
else if (action) data[action]()
else if (options.interval) data.pause().cycle()
})
}
var old = $.fn.carousel
$.fn.carousel = Plugin
$.fn.carousel.Constructor = Carousel
// CAROUSEL NO CONFLICT
// ====================
$.fn.carousel.noConflict = function () {
$.fn.carousel = old
return this
}
// CAROUSEL DATA-API
// =================
$(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) {
var href
var $this = $(this)
var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) // strip for ie7
if (!$target.hasClass('carousel')) return
var options = $.extend({}, $target.data(), $this.data())
var slideIndex = $this.attr('data-slide-to')
if (slideIndex) options.interval = false
Plugin.call($target, options)
if (slideIndex) {
$target.data('bs.carousel').to(slideIndex)
}
e.preventDefault()
})
$(window).on('load', function () {
$('[data-ride="carousel"]').each(function () {
var $carousel = $(this)
Plugin.call($carousel, $carousel.data())
})
})
}(jQuery);
/* ========================================================================
* Bootstrap: collapse.js v3.2.0
* http://getbootstrap.com/javascript/#collapse
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// COLLAPSE PUBLIC CLASS DEFINITION
// ================================
var Collapse = function (element, options) {
this.$element = $(element)
this.options = $.extend({}, Collapse.DEFAULTS, options)
this.transitioning = null
if (this.options.parent) this.$parent = $(this.options.parent)
if (this.options.toggle) this.toggle()
}
Collapse.VERSION = '3.2.0'
Collapse.DEFAULTS = {
toggle: true
}
Collapse.prototype.dimension = function () {
var hasWidth = this.$element.hasClass('width')
return hasWidth ? 'width' : 'height'
}
Collapse.prototype.show = function () {
if (this.transitioning || this.$element.hasClass('in')) return
var startEvent = $.Event('show.bs.collapse')
this.$element.trigger(startEvent)
if (startEvent.isDefaultPrevented()) return
var actives = this.$parent && this.$parent.find('> .panel > .in')
if (actives && actives.length) {
var hasData = actives.data('bs.collapse')
if (hasData && hasData.transitioning) return
Plugin.call(actives, 'hide')
hasData || actives.data('bs.collapse', null)
}
var dimension = this.dimension()
this.$element
.removeClass('collapse')
.addClass('collapsing')[dimension](0)
this.transitioning = 1
var complete = function () {
this.$element
.removeClass('collapsing')
.addClass('collapse in')[dimension]('')
this.transitioning = 0
this.$element
.trigger('shown.bs.collapse')
}
if (!$.support.transition) return complete.call(this)
var scrollSize = $.camelCase(['scroll', dimension].join('-'))
this.$element
.one('bsTransitionEnd', $.proxy(complete, this))
.emulateTransitionEnd(350)[dimension](this.$element[0][scrollSize])
}
Collapse.prototype.hide = function () {
if (this.transitioning || !this.$element.hasClass('in')) return
var startEvent = $.Event('hide.bs.collapse')
this.$element.trigger(startEvent)
if (startEvent.isDefaultPrevented()) return
var dimension = this.dimension()
this.$element[dimension](this.$element[dimension]())[0].offsetHeight
this.$element
.addClass('collapsing')
.removeClass('collapse')
.removeClass('in')
this.transitioning = 1
var complete = function () {
this.transitioning = 0
this.$element
.trigger('hidden.bs.collapse')
.removeClass('collapsing')
.addClass('collapse')
}
if (!$.support.transition) return complete.call(this)
this.$element
[dimension](0)
.one('bsTransitionEnd', $.proxy(complete, this))
.emulateTransitionEnd(350)
}
Collapse.prototype.toggle = function () {
this[this.$element.hasClass('in') ? 'hide' : 'show']()
}
// COLLAPSE PLUGIN DEFINITION
// ==========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.collapse')
var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)
if (!data && options.toggle && option == 'show') option = !option
if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.collapse
$.fn.collapse = Plugin
$.fn.collapse.Constructor = Collapse
// COLLAPSE NO CONFLICT
// ====================
$.fn.collapse.noConflict = function () {
$.fn.collapse = old
return this
}
// COLLAPSE DATA-API
// =================
$(document).on('click.bs.collapse.data-api', '[data-toggle="collapse"]', function (e) {
var href
var $this = $(this)
var target = $this.attr('data-target')
|| e.preventDefault()
|| (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') // strip for ie7
var $target = $(target)
var data = $target.data('bs.collapse')
var option = data ? 'toggle' : $this.data()
var parent = $this.attr('data-parent')
var $parent = parent && $(parent)
if (!data || !data.transitioning) {
if ($parent) $parent.find('[data-toggle="collapse"][data-parent="' + parent + '"]').not($this).addClass('collapsed')
$this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed')
}
Plugin.call($target, option)
})
}(jQuery);
/* ========================================================================
* Bootstrap: dropdown.js v3.2.0
* http://getbootstrap.com/javascript/#dropdowns
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// DROPDOWN CLASS DEFINITION
// =========================
var backdrop = '.dropdown-backdrop'
var toggle = '[data-toggle="dropdown"]'
var Dropdown = function (element) {
$(element).on('click.bs.dropdown', this.toggle)
}
Dropdown.VERSION = '3.2.0'
Dropdown.prototype.toggle = function (e) {
var $this = $(this)
if ($this.is('.disabled, :disabled')) return
var $parent = getParent($this)
var isActive = $parent.hasClass('open')
clearMenus()
if (!isActive) {
if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {
// if mobile we use a backdrop because click events don't delegate
$('<div class="dropdown-backdrop"/>').insertAfter($(this)).on('click', clearMenus)
}
var relatedTarget = { relatedTarget: this }
$parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))
if (e.isDefaultPrevented()) return
$this.trigger('focus')
$parent
.toggleClass('open')
.trigger('shown.bs.dropdown', relatedTarget)
}
return false
}
Dropdown.prototype.keydown = function (e) {
if (!/(38|40|27)/.test(e.keyCode)) return
var $this = $(this)
e.preventDefault()
e.stopPropagation()
if ($this.is('.disabled, :disabled')) return
var $parent = getParent($this)
var isActive = $parent.hasClass('open')
if (!isActive || (isActive && e.keyCode == 27)) {
if (e.which == 27) $parent.find(toggle).trigger('focus')
return $this.trigger('click')
}
var desc = ' li:not(.divider):visible a'
var $items = $parent.find('[role="menu"]' + desc + ', [role="listbox"]' + desc)
if (!$items.length) return
var index = $items.index($items.filter(':focus'))
if (e.keyCode == 38 && index > 0) index-- // up
if (e.keyCode == 40 && index < $items.length - 1) index++ // down
if (!~index) index = 0
$items.eq(index).trigger('focus')
}
function clearMenus(e) {
if (e && e.which === 3) return
$(backdrop).remove()
$(toggle).each(function () {
var $parent = getParent($(this))
var relatedTarget = { relatedTarget: this }
if (!$parent.hasClass('open')) return
$parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))
if (e.isDefaultPrevented()) return
$parent.removeClass('open').trigger('hidden.bs.dropdown', relatedTarget)
})
}
function getParent($this) {
var selector = $this.attr('data-target')
if (!selector) {
selector = $this.attr('href')
selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
var $parent = selector && $(selector)
return $parent && $parent.length ? $parent : $this.parent()
}
// DROPDOWN PLUGIN DEFINITION
// ==========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.dropdown')
if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))
if (typeof option == 'string') data[option].call($this)
})
}
var old = $.fn.dropdown
$.fn.dropdown = Plugin
$.fn.dropdown.Constructor = Dropdown
// DROPDOWN NO CONFLICT
// ====================
$.fn.dropdown.noConflict = function () {
$.fn.dropdown = old
return this
}
// APPLY TO STANDARD DROPDOWN ELEMENTS
// ===================================
$(document)
.on('click.bs.dropdown.data-api', clearMenus)
.on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
.on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)
.on('keydown.bs.dropdown.data-api', toggle + ', [role="menu"], [role="listbox"]', Dropdown.prototype.keydown)
}(jQuery);
/* ========================================================================
* Bootstrap: modal.js v3.2.0
* http://getbootstrap.com/javascript/#modals
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// MODAL CLASS DEFINITION
// ======================
var Modal = function (element, options) {
this.options = options
this.$body = $(document.body)
this.$element = $(element)
this.$backdrop =
this.isShown = null
this.scrollbarWidth = 0
if (this.options.remote) {
this.$element
.find('.modal-content')
.load(this.options.remote, $.proxy(function () {
this.$element.trigger('loaded.bs.modal')
}, this))
}
}
Modal.VERSION = '3.2.0'
Modal.DEFAULTS = {
backdrop: true,
keyboard: true,
show: true
}
Modal.prototype.toggle = function (_relatedTarget) {
return this.isShown ? this.hide() : this.show(_relatedTarget)
}
Modal.prototype.show = function (_relatedTarget) {
var that = this
var e = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })
this.$element.trigger(e)
if (this.isShown || e.isDefaultPrevented()) return
this.isShown = true
this.checkScrollbar()
this.$body.addClass('modal-open')
this.setScrollbar()
this.escape()
this.$element.on('click.dismiss.bs.modal', '[data-dismiss="modal"]', $.proxy(this.hide, this))
this.backdrop(function () {
var transition = $.support.transition && that.$element.hasClass('fade')
if (!that.$element.parent().length) {
that.$element.appendTo(that.$body) // don't move modals dom position
}
that.$element
.show()
.scrollTop(0)
if (transition) {
that.$element[0].offsetWidth // force reflow
}
that.$element
.addClass('in')
.attr('aria-hidden', false)
that.enforceFocus()
var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })
transition ?
that.$element.find('.modal-dialog') // wait for modal to slide in
.one('bsTransitionEnd', function () {
that.$element.trigger('focus').trigger(e)
})
.emulateTransitionEnd(300) :
that.$element.trigger('focus').trigger(e)
})
}
Modal.prototype.hide = function (e) {
if (e) e.preventDefault()
e = $.Event('hide.bs.modal')
this.$element.trigger(e)
if (!this.isShown || e.isDefaultPrevented()) return
this.isShown = false
this.$body.removeClass('modal-open')
this.resetScrollbar()
this.escape()
$(document).off('focusin.bs.modal')
this.$element
.removeClass('in')
.attr('aria-hidden', true)
.off('click.dismiss.bs.modal')
$.support.transition && this.$element.hasClass('fade') ?
this.$element
.one('bsTransitionEnd', $.proxy(this.hideModal, this))
.emulateTransitionEnd(300) :
this.hideModal()
}
Modal.prototype.enforceFocus = function () {
$(document)
.off('focusin.bs.modal') // guard against infinite focus loop
.on('focusin.bs.modal', $.proxy(function (e) {
if (this.$element[0] !== e.target && !this.$element.has(e.target).length) {
this.$element.trigger('focus')
}
}, this))
}
Modal.prototype.escape = function () {
if (this.isShown && this.options.keyboard) {
this.$element.on('keyup.dismiss.bs.modal', $.proxy(function (e) {
e.which == 27 && this.hide()
}, this))
} else if (!this.isShown) {
this.$element.off('keyup.dismiss.bs.modal')
}
}
Modal.prototype.hideModal = function () {
var that = this
this.$element.hide()
this.backdrop(function () {
that.$element.trigger('hidden.bs.modal')
})
}
Modal.prototype.removeBackdrop = function () {
this.$backdrop && this.$backdrop.remove()
this.$backdrop = null
}
Modal.prototype.backdrop = function (callback) {
var that = this
var animate = this.$element.hasClass('fade') ? 'fade' : ''
if (this.isShown && this.options.backdrop) {
var doAnimate = $.support.transition && animate
this.$backdrop = $('<div class="modal-backdrop ' + animate + '" />')
.appendTo(this.$body)
this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {
if (e.target !== e.currentTarget) return
this.options.backdrop == 'static'
? this.$element[0].focus.call(this.$element[0])
: this.hide.call(this)
}, this))
if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
this.$backdrop.addClass('in')
if (!callback) return
doAnimate ?
this.$backdrop
.one('bsTransitionEnd', callback)
.emulateTransitionEnd(150) :
callback()
} else if (!this.isShown && this.$backdrop) {
this.$backdrop.removeClass('in')
var callbackRemove = function () {
that.removeBackdrop()
callback && callback()
}
$.support.transition && this.$element.hasClass('fade') ?
this.$backdrop
.one('bsTransitionEnd', callbackRemove)
.emulateTransitionEnd(150) :
callbackRemove()
} else if (callback) {
callback()
}
}
Modal.prototype.checkScrollbar = function () {
if (document.body.clientWidth >= window.innerWidth) return
this.scrollbarWidth = this.scrollbarWidth || this.measureScrollbar()
}
Modal.prototype.setScrollbar = function () {
var bodyPad = parseInt((this.$body.css('padding-right') || 0), 10)
if (this.scrollbarWidth) this.$body.css('padding-right', bodyPad + this.scrollbarWidth)
}
Modal.prototype.resetScrollbar = function () {
this.$body.css('padding-right', '')
}
Modal.prototype.measureScrollbar = function () { // thx walsh
var scrollDiv = document.createElement('div')
scrollDiv.className = 'modal-scrollbar-measure'
this.$body.append(scrollDiv)
var scrollbarWidth = scrollDiv.offsetWidth - scrollDiv.clientWidth
this.$body[0].removeChild(scrollDiv)
return scrollbarWidth
}
// MODAL PLUGIN DEFINITION
// =======================
function Plugin(option, _relatedTarget) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.modal')
var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)
if (!data) $this.data('bs.modal', (data = new Modal(this, options)))
if (typeof option == 'string') data[option](_relatedTarget)
else if (options.show) data.show(_relatedTarget)
})
}
var old = $.fn.modal
$.fn.modal = Plugin
$.fn.modal.Constructor = Modal
// MODAL NO CONFLICT
// =================
$.fn.modal.noConflict = function () {
$.fn.modal = old
return this
}
// MODAL DATA-API
// ==============
$(document).on('click.bs.modal.data-api', '[data-toggle="modal"]', function (e) {
var $this = $(this)
var href = $this.attr('href')
var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\s]+$)/, ''))) // strip for ie7
var option = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())
if ($this.is('a')) e.preventDefault()
$target.one('show.bs.modal', function (showEvent) {
if (showEvent.isDefaultPrevented()) return // only register focus restorer if modal will actually get shown
$target.one('hidden.bs.modal', function () {
$this.is(':visible') && $this.trigger('focus')
})
})
Plugin.call($target, option, this)
})
}(jQuery);
/* ========================================================================
* Bootstrap: tooltip.js v3.2.0
* http://getbootstrap.com/javascript/#tooltip
* Inspired by the original jQuery.tipsy by Jason Frame
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// TOOLTIP PUBLIC CLASS DEFINITION
// ===============================
var Tooltip = function (element, options) {
this.type =
this.options =
this.enabled =
this.timeout =
this.hoverState =
this.$element = null
this.init('tooltip', element, options)
}
Tooltip.VERSION = '3.2.0'
Tooltip.DEFAULTS = {
animation: true,
placement: 'top',
selector: false,
template: '<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
trigger: 'hover focus',
title: '',
delay: 0,
html: false,
container: false,
viewport: {
selector: 'body',
padding: 0
}
}
Tooltip.prototype.init = function (type, element, options) {
this.enabled = true
this.type = type
this.$element = $(element)
this.options = this.getOptions(options)
this.$viewport = this.options.viewport && $(this.options.viewport.selector || this.options.viewport)
var triggers = this.options.trigger.split(' ')
for (var i = triggers.length; i--;) {
var trigger = triggers[i]
if (trigger == 'click') {
this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
} else if (trigger != 'manual') {
var eventIn = trigger == 'hover' ? 'mouseenter' : 'focusin'
var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
this.$element.on(eventIn + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
}
}
this.options.selector ?
(this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
this.fixTitle()
}
Tooltip.prototype.getDefaults = function () {
return Tooltip.DEFAULTS
}
Tooltip.prototype.getOptions = function (options) {
options = $.extend({}, this.getDefaults(), this.$element.data(), options)
if (options.delay && typeof options.delay == 'number') {
options.delay = {
show: options.delay,
hide: options.delay
}
}
return options
}
Tooltip.prototype.getDelegateOptions = function () {
var options = {}
var defaults = this.getDefaults()
this._options && $.each(this._options, function (key, value) {
if (defaults[key] != value) options[key] = value
})
return options
}
Tooltip.prototype.enter = function (obj) {
var self = obj instanceof this.constructor ?
obj : $(obj.currentTarget).data('bs.' + this.type)
if (!self) {
self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
$(obj.currentTarget).data('bs.' + this.type, self)
}
clearTimeout(self.timeout)
self.hoverState = 'in'
if (!self.options.delay || !self.options.delay.show) return self.show()
self.timeout = setTimeout(function () {
if (self.hoverState == 'in') self.show()
}, self.options.delay.show)
}
Tooltip.prototype.leave = function (obj) {
var self = obj instanceof this.constructor ?
obj : $(obj.currentTarget).data('bs.' + this.type)
if (!self) {
self = new this.constructor(obj.currentTarget, this.getDelegateOptions())
$(obj.currentTarget).data('bs.' + this.type, self)
}
clearTimeout(self.timeout)
self.hoverState = 'out'
if (!self.options.delay || !self.options.delay.hide) return self.hide()
self.timeout = setTimeout(function () {
if (self.hoverState == 'out') self.hide()
}, self.options.delay.hide)
}
Tooltip.prototype.show = function () {
var e = $.Event('show.bs.' + this.type)
if (this.hasContent() && this.enabled) {
this.$element.trigger(e)
var inDom = $.contains(document.documentElement, this.$element[0])
if (e.isDefaultPrevented() || !inDom) return
var that = this
var $tip = this.tip()
var tipId = this.getUID(this.type)
this.setContent()
$tip.attr('id', tipId)
this.$element.attr('aria-describedby', tipId)
if (this.options.animation) $tip.addClass('fade')
var placement = typeof this.options.placement == 'function' ?
this.options.placement.call(this, $tip[0], this.$element[0]) :
this.options.placement
var autoToken = /\s?auto?\s?/i
var autoPlace = autoToken.test(placement)
if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
$tip
.detach()
.css({ top: 0, left: 0, display: 'block' })
.addClass(placement)
.data('bs.' + this.type, this)
this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)
var pos = this.getPosition()
var actualWidth = $tip[0].offsetWidth
var actualHeight = $tip[0].offsetHeight
if (autoPlace) {
var orgPlacement = placement
var $parent = this.$element.parent()
var parentDim = this.getPosition($parent)
placement = placement == 'bottom' && pos.top + pos.height + actualHeight - parentDim.scroll > parentDim.height ? 'top' :
placement == 'top' && pos.top - parentDim.scroll - actualHeight < 0 ? 'bottom' :
placement == 'right' && pos.right + actualWidth > parentDim.width ? 'left' :
placement == 'left' && pos.left - actualWidth < parentDim.left ? 'right' :
placement
$tip
.removeClass(orgPlacement)
.addClass(placement)
}
var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)
this.applyPlacement(calculatedOffset, placement)
var complete = function () {
that.$element.trigger('shown.bs.' + that.type)
that.hoverState = null
}
$.support.transition && this.$tip.hasClass('fade') ?
$tip
.one('bsTransitionEnd', complete)
.emulateTransitionEnd(150) :
complete()
}
}
Tooltip.prototype.applyPlacement = function (offset, placement) {
var $tip = this.tip()
var width = $tip[0].offsetWidth
var height = $tip[0].offsetHeight
// manually read margins because getBoundingClientRect includes difference
var marginTop = parseInt($tip.css('margin-top'), 10)
var marginLeft = parseInt($tip.css('margin-left'), 10)
// we must check for NaN for ie 8/9
if (isNaN(marginTop)) marginTop = 0
if (isNaN(marginLeft)) marginLeft = 0
offset.top = offset.top + marginTop
offset.left = offset.left + marginLeft
// $.fn.offset doesn't round pixel values
// so we use setOffset directly with our own function B-0
$.offset.setOffset($tip[0], $.extend({
using: function (props) {
$tip.css({
top: Math.round(props.top),
left: Math.round(props.left)
})
}
}, offset), 0)
$tip.addClass('in')
// check to see if placing tip in new offset caused the tip to resize itself
var actualWidth = $tip[0].offsetWidth
var actualHeight = $tip[0].offsetHeight
if (placement == 'top' && actualHeight != height) {
offset.top = offset.top + height - actualHeight
}
var delta = this.getViewportAdjustedDelta(placement, offset, actualWidth, actualHeight)
if (delta.left) offset.left += delta.left
else offset.top += delta.top
var arrowDelta = delta.left ? delta.left * 2 - width + actualWidth : delta.top * 2 - height + actualHeight
var arrowPosition = delta.left ? 'left' : 'top'
var arrowOffsetPosition = delta.left ? 'offsetWidth' : 'offsetHeight'
$tip.offset(offset)
this.replaceArrow(arrowDelta, $tip[0][arrowOffsetPosition], arrowPosition)
}
Tooltip.prototype.replaceArrow = function (delta, dimension, position) {
this.arrow().css(position, delta ? (50 * (1 - delta / dimension) + '%') : '')
}
Tooltip.prototype.setContent = function () {
var $tip = this.tip()
var title = this.getTitle()
$tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
$tip.removeClass('fade in top bottom left right')
}
Tooltip.prototype.hide = function () {
var that = this
var $tip = this.tip()
var e = $.Event('hide.bs.' + this.type)
this.$element.removeAttr('aria-describedby')
function complete() {
if (that.hoverState != 'in') $tip.detach()
that.$element.trigger('hidden.bs.' + that.type)
}
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
$tip.removeClass('in')
$.support.transition && this.$tip.hasClass('fade') ?
$tip
.one('bsTransitionEnd', complete)
.emulateTransitionEnd(150) :
complete()
this.hoverState = null
return this
}
Tooltip.prototype.fixTitle = function () {
var $e = this.$element
if ($e.attr('title') || typeof ($e.attr('data-original-title')) != 'string') {
$e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
}
}
Tooltip.prototype.hasContent = function () {
return this.getTitle()
}
Tooltip.prototype.getPosition = function ($element) {
$element = $element || this.$element
var el = $element[0]
var isBody = el.tagName == 'BODY'
return $.extend({}, (typeof el.getBoundingClientRect == 'function') ? el.getBoundingClientRect() : null, {
scroll: isBody ? document.documentElement.scrollTop || document.body.scrollTop : $element.scrollTop(),
width: isBody ? $(window).width() : $element.outerWidth(),
height: isBody ? $(window).height() : $element.outerHeight()
}, isBody ? { top: 0, left: 0 } : $element.offset())
}
Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {
return placement == 'bottom' ? { top: pos.top + pos.height, left: pos.left + pos.width / 2 - actualWidth / 2 } :
placement == 'top' ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2 } :
placement == 'left' ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :
/* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width }
}
Tooltip.prototype.getViewportAdjustedDelta = function (placement, pos, actualWidth, actualHeight) {
var delta = { top: 0, left: 0 }
if (!this.$viewport) return delta
var viewportPadding = this.options.viewport && this.options.viewport.padding || 0
var viewportDimensions = this.getPosition(this.$viewport)
if (/right|left/.test(placement)) {
var topEdgeOffset = pos.top - viewportPadding - viewportDimensions.scroll
var bottomEdgeOffset = pos.top + viewportPadding - viewportDimensions.scroll + actualHeight
if (topEdgeOffset < viewportDimensions.top) { // top overflow
delta.top = viewportDimensions.top - topEdgeOffset
} else if (bottomEdgeOffset > viewportDimensions.top + viewportDimensions.height) { // bottom overflow
delta.top = viewportDimensions.top + viewportDimensions.height - bottomEdgeOffset
}
} else {
var leftEdgeOffset = pos.left - viewportPadding
var rightEdgeOffset = pos.left + viewportPadding + actualWidth
if (leftEdgeOffset < viewportDimensions.left) { // left overflow
delta.left = viewportDimensions.left - leftEdgeOffset
} else if (rightEdgeOffset > viewportDimensions.width) { // right overflow
delta.left = viewportDimensions.left + viewportDimensions.width - rightEdgeOffset
}
}
return delta
}
Tooltip.prototype.getTitle = function () {
var title
var $e = this.$element
var o = this.options
title = $e.attr('data-original-title')
|| (typeof o.title == 'function' ? o.title.call($e[0]) : o.title)
return title
}
Tooltip.prototype.getUID = function (prefix) {
do prefix += ~~(Math.random() * 1000000)
while (document.getElementById(prefix))
return prefix
}
Tooltip.prototype.tip = function () {
return (this.$tip = this.$tip || $(this.options.template))
}
Tooltip.prototype.arrow = function () {
return (this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow'))
}
Tooltip.prototype.validate = function () {
if (!this.$element[0].parentNode) {
this.hide()
this.$element = null
this.options = null
}
}
Tooltip.prototype.enable = function () {
this.enabled = true
}
Tooltip.prototype.disable = function () {
this.enabled = false
}
Tooltip.prototype.toggleEnabled = function () {
this.enabled = !this.enabled
}
Tooltip.prototype.toggle = function (e) {
var self = this
if (e) {
self = $(e.currentTarget).data('bs.' + this.type)
if (!self) {
self = new this.constructor(e.currentTarget, this.getDelegateOptions())
$(e.currentTarget).data('bs.' + this.type, self)
}
}
self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
}
Tooltip.prototype.destroy = function () {
clearTimeout(this.timeout)
this.hide().$element.off('.' + this.type).removeData('bs.' + this.type)
}
// TOOLTIP PLUGIN DEFINITION
// =========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.tooltip')
var options = typeof option == 'object' && option
if (!data && option == 'destroy') return
if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.tooltip
$.fn.tooltip = Plugin
$.fn.tooltip.Constructor = Tooltip
// TOOLTIP NO CONFLICT
// ===================
$.fn.tooltip.noConflict = function () {
$.fn.tooltip = old
return this
}
}(jQuery);
/* ========================================================================
* Bootstrap: popover.js v3.2.0
* http://getbootstrap.com/javascript/#popovers
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// POPOVER PUBLIC CLASS DEFINITION
// ===============================
var Popover = function (element, options) {
this.init('popover', element, options)
}
if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')
Popover.VERSION = '3.2.0'
Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {
placement: 'right',
trigger: 'click',
content: '',
template: '<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
})
// NOTE: POPOVER EXTENDS tooltip.js
// ================================
Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)
Popover.prototype.constructor = Popover
Popover.prototype.getDefaults = function () {
return Popover.DEFAULTS
}
Popover.prototype.setContent = function () {
var $tip = this.tip()
var title = this.getTitle()
var content = this.getContent()
$tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)
$tip.find('.popover-content').empty()[ // we use append for html objects to maintain js events
this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text'
](content)
$tip.removeClass('fade top bottom left right in')
// IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do
// this manually by checking the contents.
if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()
}
Popover.prototype.hasContent = function () {
return this.getTitle() || this.getContent()
}
Popover.prototype.getContent = function () {
var $e = this.$element
var o = this.options
return $e.attr('data-content')
|| (typeof o.content == 'function' ?
o.content.call($e[0]) :
o.content)
}
Popover.prototype.arrow = function () {
return (this.$arrow = this.$arrow || this.tip().find('.arrow'))
}
Popover.prototype.tip = function () {
if (!this.$tip) this.$tip = $(this.options.template)
return this.$tip
}
// POPOVER PLUGIN DEFINITION
// =========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.popover')
var options = typeof option == 'object' && option
if (!data && option == 'destroy') return
if (!data) $this.data('bs.popover', (data = new Popover(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.popover
$.fn.popover = Plugin
$.fn.popover.Constructor = Popover
// POPOVER NO CONFLICT
// ===================
$.fn.popover.noConflict = function () {
$.fn.popover = old
return this
}
}(jQuery);
/* ========================================================================
* Bootstrap: scrollspy.js v3.2.0
* http://getbootstrap.com/javascript/#scrollspy
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// SCROLLSPY CLASS DEFINITION
// ==========================
function ScrollSpy(element, options) {
var process = $.proxy(this.process, this)
this.$body = $('body')
this.$scrollElement = $(element).is('body') ? $(window) : $(element)
this.options = $.extend({}, ScrollSpy.DEFAULTS, options)
this.selector = (this.options.target || '') + ' .nav li > a'
this.offsets = []
this.targets = []
this.activeTarget = null
this.scrollHeight = 0
this.$scrollElement.on('scroll.bs.scrollspy', process)
this.refresh()
this.process()
}
ScrollSpy.VERSION = '3.2.0'
ScrollSpy.DEFAULTS = {
offset: 10
}
ScrollSpy.prototype.getScrollHeight = function () {
return this.$scrollElement[0].scrollHeight || Math.max(this.$body[0].scrollHeight, document.documentElement.scrollHeight)
}
ScrollSpy.prototype.refresh = function () {
var offsetMethod = 'offset'
var offsetBase = 0
if (!$.isWindow(this.$scrollElement[0])) {
offsetMethod = 'position'
offsetBase = this.$scrollElement.scrollTop()
}
this.offsets = []
this.targets = []
this.scrollHeight = this.getScrollHeight()
var self = this
this.$body
.find(this.selector)
.map(function () {
var $el = $(this)
var href = $el.data('target') || $el.attr('href')
var $href = /^#./.test(href) && $(href)
return ($href
&& $href.length
&& $href.is(':visible')
&& [[$href[offsetMethod]().top + offsetBase, href]]) || null
})
.sort(function (a, b) { return a[0] - b[0] })
.each(function () {
self.offsets.push(this[0])
self.targets.push(this[1])
})
}
ScrollSpy.prototype.process = function () {
var scrollTop = this.$scrollElement.scrollTop() + this.options.offset
var scrollHeight = this.getScrollHeight()
var maxScroll = this.options.offset + scrollHeight - this.$scrollElement.height()
var offsets = this.offsets
var targets = this.targets
var activeTarget = this.activeTarget
var i
if (this.scrollHeight != scrollHeight) {
this.refresh()
}
if (scrollTop >= maxScroll) {
return activeTarget != (i = targets[targets.length - 1]) && this.activate(i)
}
if (activeTarget && scrollTop <= offsets[0]) {
return activeTarget != (i = targets[0]) && this.activate(i)
}
for (i = offsets.length; i--;) {
activeTarget != targets[i]
&& scrollTop >= offsets[i]
&& (!offsets[i + 1] || scrollTop <= offsets[i + 1])
&& this.activate(targets[i])
}
}
ScrollSpy.prototype.activate = function (target) {
this.activeTarget = target
$(this.selector)
.parentsUntil(this.options.target, '.active')
.removeClass('active')
var selector = this.selector +
'[data-target="' + target + '"],' +
this.selector + '[href="' + target + '"]'
var active = $(selector)
.parents('li')
.addClass('active')
if (active.parent('.dropdown-menu').length) {
active = active
.closest('li.dropdown')
.addClass('active')
}
active.trigger('activate.bs.scrollspy')
}
// SCROLLSPY PLUGIN DEFINITION
// ===========================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.scrollspy')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.scrollspy
$.fn.scrollspy = Plugin
$.fn.scrollspy.Constructor = ScrollSpy
// SCROLLSPY NO CONFLICT
// =====================
$.fn.scrollspy.noConflict = function () {
$.fn.scrollspy = old
return this
}
// SCROLLSPY DATA-API
// ==================
$(window).on('load.bs.scrollspy.data-api', function () {
$('[data-spy="scroll"]').each(function () {
var $spy = $(this)
Plugin.call($spy, $spy.data())
})
})
}(jQuery);
/* ========================================================================
* Bootstrap: tab.js v3.2.0
* http://getbootstrap.com/javascript/#tabs
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// TAB CLASS DEFINITION
// ====================
var Tab = function (element) {
this.element = $(element)
}
Tab.VERSION = '3.2.0'
Tab.prototype.show = function () {
var $this = this.element
var $ul = $this.closest('ul:not(.dropdown-menu)')
var selector = $this.data('target')
if (!selector) {
selector = $this.attr('href')
selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
}
if ($this.parent('li').hasClass('active')) return
var previous = $ul.find('.active:last a')[0]
var e = $.Event('show.bs.tab', {
relatedTarget: previous
})
$this.trigger(e)
if (e.isDefaultPrevented()) return
var $target = $(selector)
this.activate($this.closest('li'), $ul)
this.activate($target, $target.parent(), function () {
$this.trigger({
type: 'shown.bs.tab',
relatedTarget: previous
})
})
}
Tab.prototype.activate = function (element, container, callback) {
var $active = container.find('> .active')
var transition = callback
&& $.support.transition
&& $active.hasClass('fade')
function next() {
$active
.removeClass('active')
.find('> .dropdown-menu > .active')
.removeClass('active')
element.addClass('active')
if (transition) {
element[0].offsetWidth // reflow for transition
element.addClass('in')
} else {
element.removeClass('fade')
}
if (element.parent('.dropdown-menu')) {
element.closest('li.dropdown').addClass('active')
}
callback && callback()
}
transition ?
$active
.one('bsTransitionEnd', next)
.emulateTransitionEnd(150) :
next()
$active.removeClass('in')
}
// TAB PLUGIN DEFINITION
// =====================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.tab')
if (!data) $this.data('bs.tab', (data = new Tab(this)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.tab
$.fn.tab = Plugin
$.fn.tab.Constructor = Tab
// TAB NO CONFLICT
// ===============
$.fn.tab.noConflict = function () {
$.fn.tab = old
return this
}
// TAB DATA-API
// ============
$(document).on('click.bs.tab.data-api', '[data-toggle="tab"], [data-toggle="pill"]', function (e) {
e.preventDefault()
Plugin.call($(this), 'show')
})
}(jQuery);
/* ========================================================================
* Bootstrap: affix.js v3.2.0
* http://getbootstrap.com/javascript/#affix
* ========================================================================
* Copyright 2011-2014 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
* ======================================================================== */
+function ($) {
'use strict';
// AFFIX CLASS DEFINITION
// ======================
var Affix = function (element, options) {
this.options = $.extend({}, Affix.DEFAULTS, options)
this.$target = $(this.options.target)
.on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))
.on('click.bs.affix.data-api', $.proxy(this.checkPositionWithEventLoop, this))
this.$element = $(element)
this.affixed =
this.unpin =
this.pinnedOffset = null
this.checkPosition()
}
Affix.VERSION = '3.2.0'
Affix.RESET = 'affix affix-top affix-bottom'
Affix.DEFAULTS = {
offset: 0,
target: window
}
Affix.prototype.getPinnedOffset = function () {
if (this.pinnedOffset) return this.pinnedOffset
this.$element.removeClass(Affix.RESET).addClass('affix')
var scrollTop = this.$target.scrollTop()
var position = this.$element.offset()
return (this.pinnedOffset = position.top - scrollTop)
}
Affix.prototype.checkPositionWithEventLoop = function () {
setTimeout($.proxy(this.checkPosition, this), 1)
}
Affix.prototype.checkPosition = function () {
if (!this.$element.is(':visible')) return
var scrollHeight = $(document).height()
var scrollTop = this.$target.scrollTop()
var position = this.$element.offset()
var offset = this.options.offset
var offsetTop = offset.top
var offsetBottom = offset.bottom
if (typeof offset != 'object') offsetBottom = offsetTop = offset
if (typeof offsetTop == 'function') offsetTop = offset.top(this.$element)
if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)
var affix = this.unpin != null && (scrollTop + this.unpin <= position.top) ? false :
offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ? 'bottom' :
offsetTop != null && (scrollTop <= offsetTop) ? 'top' : false
if (this.affixed === affix) return
if (this.unpin != null) this.$element.css('top', '')
var affixType = 'affix' + (affix ? '-' + affix : '')
var e = $.Event(affixType + '.bs.affix')
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
this.affixed = affix
this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null
this.$element
.removeClass(Affix.RESET)
.addClass(affixType)
.trigger($.Event(affixType.replace('affix', 'affixed')))
if (affix == 'bottom') {
this.$element.offset({
top: scrollHeight - this.$element.height() - offsetBottom
})
}
}
// AFFIX PLUGIN DEFINITION
// =======================
function Plugin(option) {
return this.each(function () {
var $this = $(this)
var data = $this.data('bs.affix')
var options = typeof option == 'object' && option
if (!data) $this.data('bs.affix', (data = new Affix(this, options)))
if (typeof option == 'string') data[option]()
})
}
var old = $.fn.affix
$.fn.affix = Plugin
$.fn.affix.Constructor = Affix
// AFFIX NO CONFLICT
// =================
$.fn.affix.noConflict = function () {
$.fn.affix = old
return this
}
// AFFIX DATA-API
// ==============
$(window).on('load', function () {
$('[data-spy="affix"]').each(function () {
var $spy = $(this)
var data = $spy.data()
data.offset = data.offset || {}
if (data.offsetBottom) data.offset.bottom = data.offsetBottom
if (data.offsetTop) data.offset.top = data.offsetTop
Plugin.call($spy, data)
})
})
}(jQuery); | zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/zbx_dashboard/static/zbx_dashboard/js/bootstrap.js | bootstrap.js |
;(function($) {
$.fn.formset = function(opts)
{
var options = $.extend({}, $.fn.formset.defaults, opts),
flatExtraClasses = options.extraClasses.join(' '),
totalForms = $('#id_' + options.prefix + '-TOTAL_FORMS'),
maxForms = $('#id_' + options.prefix + '-MAX_NUM_FORMS'),
childElementSelector = 'input,select,textarea,label,div',
$$ = $(this),
applyExtraClasses = function(row, ndx) {
if (options.extraClasses) {
row.removeClass(flatExtraClasses);
row.addClass(options.extraClasses[ndx % options.extraClasses.length]);
}
},
updateElementIndex = function(elem, prefix, ndx) {
var idRegex = new RegExp(prefix + '-(\\d+|__prefix__)-'),
replacement = prefix + '-' + ndx + '-';
if (elem.attr("for")) elem.attr("for", elem.attr("for").replace(idRegex, replacement));
if (elem.attr('id')) elem.attr('id', elem.attr('id').replace(idRegex, replacement));
if (elem.attr('name')) elem.attr('name', elem.attr('name').replace(idRegex, replacement));
},
hasChildElements = function(row) {
return row.find(childElementSelector).length > 0;
},
showAddButton = function() {
return maxForms.length == 0 || // For Django versions pre 1.2
(maxForms.val() == '' || (maxForms.val() - totalForms.val() > 0));
},
insertDeleteLink = function(row) {
var delCssSelector = options.deleteCssClass.trim().replace(/\s+/g, '.'),
addCssSelector = options.addCssClass.trim().replace(/\s+/g, '.');
if (row.is('TR')) {
// If the forms are laid out in table rows, insert
// the remove button into the last table cell:
row.children(':last').append('<a class="' + options.deleteCssClass +'" href="javascript:void(0)">' + options.deleteText + '</a>');
} else if (row.is('UL') || row.is('OL')) {
// If they're laid out as an ordered/unordered list,
// insert an <li> after the last list item:
row.append('<li><a class="' + options.deleteCssClass + '" href="javascript:void(0)">' + options.deleteText +'</a></li>');
} else {
// Otherwise, just insert the remove button as the
// last child element of the form's container:
row.append('<a class="' + options.deleteCssClass + '" href="javascript:void(0)">' + options.deleteText +'</a>');
}
row.find('a.' + delCssSelector).click(function() {
var row = $(this).parents('.' + options.formCssClass),
del = row.find('input:hidden[id $= "-DELETE"]'),
buttonRow = row.siblings("a." + addCssSelector + ', .' + options.formCssClass + '-add'),
forms;
if (del.length) {
// We're dealing with an inline formset.
// Rather than remove this form from the DOM, we'll mark it as deleted
// and hide it, then let Django handle the deleting:
del.val('on');
row.hide();
forms = $('.' + options.formCssClass).not(':hidden');
} else {
row.remove();
// Update the TOTAL_FORMS count:
forms = $('.' + options.formCssClass).not('.formset-custom-template');
totalForms.val(forms.length);
}
for (var i=0, formCount=forms.length; i<formCount; i++) {
// Apply `extraClasses` to form rows so they're nicely alternating:
applyExtraClasses(forms.eq(i), i);
if (!del.length) {
// Also update names and IDs for all child controls (if this isn't
// a delete-able inline formset) so they remain in sequence:
forms.eq(i).find(childElementSelector).each(function() {
updateElementIndex($(this), options.prefix, i);
});
}
}
// Check if we need to show the add button:
if (buttonRow.is(':hidden') && showAddButton()) buttonRow.show();
// If a post-delete callback was provided, call it with the deleted form:
if (options.removed) options.removed(row);
return false;
});
};
$$.each(function(i) {
var row = $(this),
del = row.find('input:checkbox[id $= "-DELETE"]');
if (del.length) {
// If you specify "can_delete = True" when creating an inline formset,
// Django adds a checkbox to each form in the formset.
// Replace the default checkbox with a hidden field:
if (del.is(':checked')) {
// If an inline formset containing deleted forms fails validation, make sure
// we keep the forms hidden (thanks for the bug report and suggested fix Mike)
del.before('<input type="hidden" name="' + del.attr('name') +'" id="' + del.attr('id') +'" value="on" />');
row.hide();
} else {
del.before('<input type="hidden" name="' + del.attr('name') +'" id="' + del.attr('id') +'" />');
}
// Hide any labels associated with the DELETE checkbox:
$('label[for="' + del.attr('id') + '"]').hide();
del.remove();
}
if (hasChildElements(row)) {
row.addClass(options.formCssClass);
if (row.is(':visible')) {
insertDeleteLink(row);
applyExtraClasses(row, i);
}
}
});
if ($$.length) {
var hideAddButton = !showAddButton(),
addButton, template;
if (options.formTemplate) {
// If a form template was specified, we'll clone it to generate new form instances:
template = (options.formTemplate instanceof $) ? options.formTemplate : $(options.formTemplate);
template.removeAttr('id').addClass(options.formCssClass + ' formset-custom-template');
template.find(childElementSelector).each(function() {
updateElementIndex($(this), options.prefix, '__prefix__');
});
insertDeleteLink(template);
} else {
// Otherwise, use the last form in the formset; this works much better if you've got
// extra (>= 1) forms (thnaks to justhamade for pointing this out):
template = $('.' + options.formCssClass + ':last').clone(true).removeAttr('id');
template.find('input:hidden[id $= "-DELETE"]').remove();
// Clear all cloned fields, except those the user wants to keep (thanks to brunogola for the suggestion):
template.find(childElementSelector).not(options.keepFieldValues).each(function() {
var elem = $(this);
// If this is a checkbox or radiobutton, uncheck it.
// This fixes Issue 1, reported by Wilson.Andrew.J:
if (elem.is('input:checkbox') || elem.is('input:radio')) {
elem.attr('checked', false);
} else {
elem.val('');
}
});
}
// FIXME: Perhaps using $.data would be a better idea?
options.formTemplate = template;
if ($$.is('TR')) {
// If forms are laid out as table rows, insert the
// "add" button in a new table row:
var numCols = $$.eq(0).children().length, // This is a bit of an assumption :|
buttonRow = $('<tr><td colspan="' + numCols + '"><a class="' + options.addCssClass + '" href="javascript:void(0)">' + options.addText + '</a></tr>')
.addClass(options.formCssClass + '-add');
$$.parent().append(buttonRow);
if (hideAddButton) buttonRow.hide();
addButton = buttonRow.find('a');
} else {
// Otherwise, insert it immediately after the last form:
$$.filter(':last').after('<a class="' + options.addCssClass + '" href="javascript:void(0)">' + options.addText + '</a>');
addButton = $$.filter(':last').next();
if (hideAddButton) addButton.hide();
}
addButton.click(function() {
var formCount = parseInt(totalForms.val()),
row = options.formTemplate.clone(true).removeClass('formset-custom-template'),
buttonRow = $($(this).parents('tr.' + options.formCssClass + '-add').get(0) || this);
applyExtraClasses(row, formCount);
row.insertBefore(buttonRow).show();
row.find(childElementSelector).each(function() {
updateElementIndex($(this), options.prefix, formCount);
});
totalForms.val(formCount + 1);
// Check if we've exceeded the maximum allowed number of forms:
if (!showAddButton()) buttonRow.hide();
// If a post-add callback was supplied, call it with the added form:
if (options.added) options.added(row);
return false;
});
}
return $$;
};
/* Setup plugin defaults */
$.fn.formset.defaults = {
prefix: 'form', // The form prefix for your django formset
formTemplate: null, // The jQuery selection cloned to generate new form instances
addText: '+', // Text for the add link
deleteText: '-', // Text for the delete link
addCssClass: 'btn btn-default btn-sm active add-row', // CSS class applied to the add link
deleteCssClass: 'btn btn-danger btn-xs active delete-row', // CSS class applied to the delete link
formCssClass: 'dynamic-form', // CSS class applied to each form in a formset
extraClasses: [], // Additional CSS classes, which will be applied to each form in turn
keepFieldValues: '', // jQuery selector for fields whose values should be kept when the form is cloned
added: null, // Function called each time a new form is added
removed: null // Function called each time a form is deleted
};
})(jQuery); | zbx-dashboard | /zbx-dashboard-0.1.2.tar.gz/zbx-dashboard-0.1.2/zbx_dashboard/static/zbx_dashboard/js/jquery.formset.js | jquery.formset.js |
Introduction
------------
zbx-statsd is a clone of Etsy's statsd and Steve Ivy's py-statsd designed to work with Zabbix as stats collection and graphing backend. Based on sources of pystatsd.
* pystatsd
- https://github.com/sivy/py-statsd/
* Graphite
- http://graphite.wikidot.com
* Statsd
- code: https://github.com/etsy/statsd
- blog post: http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/
Usage
-------------
Client:
from zbx-statsd import Client, Server
sc = Client('example.org',8125, 'zabbix_name_of_this_machine')
sc.timing('python_test.time',500)
sc.increment('python_test.inc_int')
sc.decrement('python_test.decr_int')
Server:
srvr = Server(debug=True)
srvr.serve()
| zbx-statsd | /zbx-statsd-0.4.0.tar.gz/zbx-statsd-0.4.0/README.txt | README.txt |
from __future__ import with_statement
import atexit
import os
from signal import SIGTERM
import sys
import time
class Daemon(object):
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""UNIX double-fork magic."""
try:
pid = os.fork()
if pid > 0:
# First parent; exit.
sys.exit(0)
except OSError, e:
sys.stderr.write('Could not fork! %d (%s)\n' %
(e.errno, e.strerror))
sys.exit(1)
# Disconnect from parent environment.
os.chdir('/')
os.setsid()
os.umask(0)
# Fork again.
try:
pid = os.fork()
if pid > 0:
# Second parent; exit.
sys.exit(0)
except OSError, e:
sys.stderr.write('Could not fork (2nd)! %d (%s)\n' %
(e.errno, e.strerror))
sys.exit(1)
# Redirect file descriptors.
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Write the pidfile.
atexit.register(self.delpid)
pid = str(os.getpid())
with open(self.pidfile, 'w+') as fp:
fp.write('%s\n' % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args, **kw):
"""Start the daemon."""
pid = None
if os.path.exists(self.pidfile):
with open(self.pidfile, 'r') as fp:
pid = int(fp.read().strip())
if pid:
msg = 'pidfile (%s) exists. Daemon already running?\n'
sys.stderr.write(msg % self.pidfile)
sys.exit(1)
self.daemonize()
self.run(*args, **kw)
def stop(self):
"""Stop the daemon."""
pid = None
if os.path.exists(self.pidfile):
with open(self.pidfile, 'r') as fp:
pid = int(fp.read().strip())
if not pid:
msg = 'pidfile (%s) does not exist. Daemon not running?\n'
sys.stderr.write(msg % self.pidfile)
return
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, e:
e = str(e)
if e.find('No such process') > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print e
sys.exit(1)
def restart(self, *args, **kw):
"""Restart the daemon."""
self.stop()
self.start(*args, **kw)
def run(self, *args, **kw):
"""Override this method.""" | zbx-statsd | /zbx-statsd-0.4.0.tar.gz/zbx-statsd-0.4.0/zbxstatsd/daemon.py | daemon.py |
# Steve Ivy <[email protected]>
# http://monkinetic.com
import logging
import socket
import random
# Sends statistics to the stats daemon over UDP
class Client(object):
def __init__(self, host='127.0.0.1', port=8126, zabbix_hostname=None):
"""
Create a new Statsd client.
* host: the host where statsd is listening, defaults to 127.0.0.1
* port: the port where statsd is listening, defaults to 8126
>>> from pystatsd import statsd
>>> stats_client = statsd.Statsd(host, port)
"""
self.host = host
self.port = int(port)
self.zabbix_hostname = zabbix_hostname or socket.gethostname()
self.log = logging.getLogger("zbxstatsd.client")
self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def timing(self, stat, time, sample_rate=1):
"""
Log timing information for a single stat
>>> statsd_client.timing('some.time',0.5)
"""
stats = {stat: "%d|ms" % (time*1000.0)}
self.send(stats, sample_rate)
def increment(self, stats, sample_rate=1):
"""
Increments one or more stats counters
>>> statsd_client.increment('some.int')
>>> statsd_client.increment('some.int',0.5)
"""
self.update_stats(stats, 1, sample_rate)
def decrement(self, stats, sample_rate=1):
"""
Decrements one or more stats counters
>>> statsd_client.decrement('some.int')
"""
self.update_stats(stats, -1, sample_rate)
def update_stats(self, stats, delta=1, sampleRate=1):
"""
Updates one or more stats counters by arbitrary amounts
>>> statsd_client.update_stats('some.int',10)
"""
if not isinstance(stats, list):
stats = [stats]
data = dict((stat, "%s|c" % delta) for stat in stats)
self.send(data, sampleRate)
def send(self, data, sample_rate=1):
"""
Squirt the metrics over UDP
"""
addr = (self.host, self.port)
sampled_data = {}
if sample_rate < 1:
if random.random() > sample_rate:
return
sampled_data = dict((stat, "%s|@%s" % (value, sample_rate)) for stat, value in data.iteritems())
else:
sampled_data=data
try:
[self.udp_sock.sendto("%s:%s:%s" % (self.zabbix_hostname, stat, value), addr) for stat, value in sampled_data.iteritems()]
except:
self.log.exception("unexpected error") | zbx-statsd | /zbx-statsd-0.4.0.tar.gz/zbx-statsd-0.4.0/zbxstatsd/client.py | client.py |
import re
from socket import AF_INET, SOCK_DGRAM, socket
import threading
import time
import types
import logging
from zbxsend import Metric, send_to_zabbix
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = None
from daemon import Daemon
__all__ = ['Server']
def _clean_key(k):
return re.sub(
'[^a-zA-Z_\-0-9\.]',
'',
k.replace('/','-').replace(' ','_')
)
class Server(object):
def __init__(self, pct_threshold=90, debug=False, zabbix_host='localhost', zabbix_port=10051, flush_interval=10000):
self.buf = 1024
self.flush_interval = flush_interval
self.pct_threshold = pct_threshold
self.zabbix_host = zabbix_host
self.zabbix_port = zabbix_port
self.debug = debug
self.counters = {}
self.timers = {}
self.flusher = 0
def process(self, data):
try:
host, key, val = data.split(':')
except ValueError:
logging.info('Got invalid data packet. Skipping')
logging.debug('Data packet dump: %r' % data)
return
key = _clean_key(key)
sample_rate = 1;
fields = val.split('|')
item_key = '%s:%s' % (host, key)
if (fields[1] == 'ms'):
if item_key not in self.timers:
self.timers[item_key] = []
self.timers[item_key].append(int(fields[0] or 0))
else:
if len(fields) == 3:
sample_rate = float(re.match('^@([\d\.]+)', fields[2]).groups()[0])
if item_key not in self.counters:
self.counters[item_key] = 0;
self.counters[item_key] += int(fields[0] or 1) * (1 / sample_rate)
def flush(self):
ts = int(time.time())
stats = 0
stat_string = ''
# self.pct_threshold = 10
metrics = []
for k, v in self.counters.items():
v = float(v) / (self.flush_interval / 1000)
host, key = k.split(':',1)
metrics.append(Metric(host, key, str(v), ts))
self.counters[k] = 0
stats += 1
for k, v in self.timers.items():
if len(v) > 0:
v.sort()
count = len(v)
min = v[0]
max = v[-1]
mean = min
max_threshold = max
median = min
if count > 1:
thresh_index = int(round(count*float(self.pct_threshold)/100))#count - int(round((100.0 - self.pct_threshold) / 100) * count)
max_threshold = v[thresh_index - 1]
total = sum(v[:thresh_index])
mean = total / thresh_index
if count%2 == 0:
median = (v[count/2] + v[count/2-1])/2.0
else:
median = (v[count/2])
self.timers[k] = []
host, key = k.split(':', 1)
metrics.extend([
Metric(host, key + '[mean]', mean, ts),
Metric(host, key + '[upper]', max, ts),
Metric(host, key + '[lower]', min, ts),
Metric(host, key + '[count]', count, ts),
Metric(host, key + '[upper_%s]' % self.pct_threshold, max_threshold, ts),
Metric(host, key + '[median]', median, ts),
])
stats += 1
# stat_string += 'statsd.numStats %s %d' % (stats, ts)
send_to_zabbix(metrics, self.zabbix_host, self.zabbix_port)
self._set_timer()
if self.debug:
print metrics
def _set_timer(self):
self._timer = threading.Timer(self.flush_interval/1000, self.flush)
self._timer.start()
def serve(self, hostname='', port=8126, zabbix_host='localhost', zabbix_port=2003):
assert type(port) is types.IntType, 'port is not an integer: %s' % (port)
addr = (hostname, port)
self._sock = socket(AF_INET, SOCK_DGRAM)
self._sock.bind(addr)
self.zabbix_host = zabbix_host
self.zabbix_port = zabbix_port
import signal
import sys
def signal_handler(signal, frame):
self.stop()
signal.signal(signal.SIGINT, signal_handler)
self._set_timer()
while 1:
data, addr = self._sock.recvfrom(self.buf)
self.process(data)
def stop(self):
self._timer.cancel()
self._sock.close()
class ServerDaemon(Daemon):
def run(self, options):
if setproctitle:
setproctitle('zbxstatsd')
server = Server(pct_threshold=options.pct, debug=options.debug, flush_interval=options.flush_interval)
server.serve(options.name, options.port, options.zabbix_host,
options.zabbix_port)
def main():
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', dest='debug', action='store_true', help='debug mode', default=False)
parser.add_argument('-n', '--name', dest='name', help='hostname to run on', default='')
parser.add_argument('-p', '--port', dest='port', help='port to run on', type=int, default=8126)
parser.add_argument('--zabbix-port', dest='zabbix_port', help='port to connect to zabbix on', type=int, default=10051)
parser.add_argument('--zabbix-host', dest='zabbix_host', help='host to connect to zabbix on', type=str, default='localhost')
parser.add_argument('-l', dest='log_file', help='log file', type=str, default=None)
parser.add_argument('-f', '--flush-interval', dest='flush_interval', help='interval between flushes', type=int, default=10000)
parser.add_argument('-t', '--pct', dest='pct', help='stats pct threshold', type=int, default=90)
parser.add_argument('-D', '--daemon', dest='daemonize', action='store_true', help='daemonize', default=False)
parser.add_argument('--pidfile', dest='pidfile', action='store', help='pid file', default='/tmp/pystatsd.pid')
parser.add_argument('--restart', dest='restart', action='store_true', help='restart a running daemon', default=False)
parser.add_argument('--stop', dest='stop', action='store_true', help='stop a running daemon', default=False)
options = parser.parse_args(sys.argv[1:])
logging.basicConfig(level=logging.DEBUG if options.debug else logging.WARN,
stream=open(options.log_file) if options.log_file else sys.stderr)
daemon = ServerDaemon(options.pidfile)
if options.daemonize:
daemon.start(options)
elif options.restart:
daemon.restart(options)
elif options.stop:
daemon.stop()
else:
daemon.run(options)
if __name__ == '__main__':
main() | zbx-statsd | /zbx-statsd-0.4.0.tar.gz/zbx-statsd-0.4.0/zbxstatsd/server.py | server.py |
zbx2slack
================================
.. image:: https://travis-ci.org/laughk/zbx2slack.svg?branch=master
:target: https://travis-ci.org/laughk/zbx2slack
Zabbix Alert Notification Script for Slack. by pure python.
- Can use by "Remote command". But can't use by "Media type".
- if use by python2.6 (like CentOS6.x), install ``argparse`` module. ex,
.. sourcecode:: sh
$ sudo yum install python-argparse
Screenshot
------------------
Notification example.
.. image:: docs/images/screenshot1.png
Install
-----------------------
In your zabbix server,
.. sourcecode:: sh
$ pip install zbx2slack
or you can download directly using ``wget``, ``curl``.
.. sourcecode:: sh
$ wget https://raw.githubusercontent.com/laughk/zbx2slack/master/zbx2slack.py
$ chmod +x zbx2slack.py
(if necessary, use ``sudo``. )
set this script your zabbix server.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
put file and add mode to execute.
Usage
-----------------------
get incoming-webhook url for your slack. from `incoming webhook integration <https://my.slack.com/services/new/incoming-webhook>`_.
In the WebUI of your zabbix server.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. [Configureation]
2. [Action]
3. Choose 'Trigger' at Event source and Create Action.
4. if "Recovery message" checked, Uncheck the checkbox.
5. At [Conditions] tab, add ``Trigger value = OK`` to Conditions.
- ``Trigger value = OK`` and ``Trigger value = PROBLEM`` are in Conditions.
6. At [Operations] tab, add ``Remote Command``
- Operation type : Remote Command
- Targeta list : any host (ex. Current host)
- Type : Custom script
- Execute on : Zabbix server
- Commands:
(if directly download, replace ``zbx2slack`` to ``zbx2slack.py``.)
.. sourcecode:: sh
zbx2slack \
--zabbix-server-url "http://zabbix.example.com/zabbix" \
--slack-botname "Zabbix Alert" \
--slack-incoming-webhook-url "https://hooks.slack.com/services/xxxxxxxxx/xxxxxxxxx/...." \
--trigger-id "{TRIGGER.ID}" \
--trigger-name "{TRIGGER.NAME}" \
--trigger-status "{TRIGGER.STATUS}" \
--trigger-severity "{TRIGGER.SEVERITY}" \
--event-id "{EVENT.ID}" \
--item "{HOST.NAME1}|{ITEM.NAME1}|{ITEM.KEY1}|{ITEM.VALUE1}|{ITEM.ID1}" \
--item "{HOST.NAME2}|{ITEM.NAME2}|{ITEM.KEY2}|{ITEM.VALUE2}|{ITEM.ID2}" \
--item "{HOST.NAME3}|{ITEM.NAME3}|{ITEM.KEY3}|{ITEM.VALUE3}|{ITEM.ID3}"
LICENSE
------------------------
MIT
AUTHOR
------------------------
Kei Iwasaki <[email protected]>
| zbx2slack | /zbx2slack-0.1.1.tar.gz/zbx2slack-0.1.1/README.rst | README.rst |
import os, sys
import re
import argparse
import json
try:
'''for python3.x'''
from urllib import request
except ImportError:
'''for python2.x'''
import urllib2 as request
__version__ = '0.1.1'
__scriptname__ = os.path.basename(__file__).replace('.py','')
class noticeInfo(object):
def __init__(self, args):
self.slack_botname = args.slack_botname
self.zabbix_server_url = args.zabbix_server_url
self.trigger_id = args.trigger_id
self.trigger_name = args.trigger_name
self.trigger_status = args.trigger_status
self.trigger_severity = args.trigger_severity
self.event_id = args.event_id
self._item_text_list = args.item
self.items = self._gen_items()
self.trigger_url = self._gen_trigger_url()
self.attachment_color = self._gen_attachment_color()
self.pretext = self._gen_pretext()
self.attachment_fields = self._gen_attachment_fields()
self._payload = self._gen_payload()
def _gen_trigger_url(self):
'''
generate and return url of Alert Tigger infomation.
ex.
http://zabbix.example.com/zabbix/tr_events.php?triggerid=00000&eventid=00
'''
_trigger_url = '{0}/tr_events.php?triggerid={1}&eventid={2}'.format(
self.zabbix_server_url,
self.trigger_id,
self.event_id)
return _trigger_url
def _gen_items(self):
"""
generate item dictionary
from:
[
'{HOST.NAME1}|{ITEM.NAME1}|{ITEM.KEY1}|{ITEM.VALUE1}|{ITEM.ID1}',
'{HOST.NAME2}|{ITEM.NAME2}|{ITEM.KEY2}|{ITEM.VALUE2}|{ITEM.ID2}',
]
to:
[
{ 'hostname': '{HOST.NAME1}',
'name': '{ITEM.NAME1}',
'key': '{ITEM.KEY1}',
'value': '{ITEM.VALUE1}',
'id': '{ITEM.ID1}'
},
{ 'hostname': '{HOST.NAME2}',
'name': '{ITEM.NAME2}',
'key': '{ITEM.KEY2}',
'value': '{ITEM.VALUE2}',
'id': '{ITEM.ID2}'
},
]
"""
_items = [
{
'hostname': i[0],
'name': i[1],
'key': i[2],
'value': i[3],
'id': i[4]
}
for i in [
item_text.split('|')
for item_text in self._item_text_list
if not r'*UNKNOWN*' in item_text
]
]
return _items
def _gen_pretext(self):
'''
generate and return string for alert pretext, by the state.
'''
if self.trigger_status == 'PROBLEM':
return ':boom: A problem occurred '
elif self.trigger_status == 'OK':
return ':white_check_mark: A problem recovered :+1:'
else:
return ':ghost::ghost: UNKNOWN :ghost::ghost:'
def _gen_attachment_color(self):
'''
generate and return attchment color by the state.
ref. https://api.slack.com/docs/attachments#color
'''
if self.trigger_status == 'PROBLEM':
return 'danger'
elif self.trigger_status == 'OK':
return 'good'
else:
return 'warning'
def _gen_attachment_fields(self):
'''
generate and return attchment fields for each items.
ref. https://api.slack.com/docs/attachments#fields
'''
_fileds = []
for _item in self.items:
_item_graph_url = '{0}/history.php?action=showgraph&itemids%5B%5D={id}'.format(
self.zabbix_server_url,
**_item)
_fileds.append({
'title': '{hostname} - {name}'.format(**_item),
'value': ':mag_right: {key} | *{value}* [<{0}|Graph>]'.format(_item_graph_url, **_item)
})
return _fileds
def _gen_payload(self):
'''
generate and return payload for posting to slack.
ref. https://api.slack.com/docs/attachments#fields
'''
_payload = json.dumps({
'username': self.slack_botname,
'attachments': [{
'color': self.attachment_color,
'fields': self.attachment_fields,
'title': self.trigger_name,
'title_link': self.trigger_url,
'pretext': self.pretext,
'mrkdwn_in': [
'title', 'pretext', 'fields'
],
}]
})
if isinstance(_payload, str):
return _payload.encode('utf-8')
return _payload
@property
def payload(self):
return self._payload
def alert_to_slack(payload, slack_incoming_webhook):
request_header = {'Content-Type': 'application/json'}
req = request.Request(
slack_incoming_webhook,
payload,
request_header)
request.urlopen(req)
def main():
'''
Environment Check and merge to SCRIPT_ENV
-------------------------------
{{{
'''
SCRIPT_ENV = {
'ZABBIX_SERVER_URL': '',
'INCOMING_WEBHOOK_URL': ''
}
for env in SCRIPT_ENV.keys():
if env in os.environ.keys():
SCRIPT_ENV[env] = os.environ[env]
'''
------------------------------
}}}
'''
'''
Analyze options
-------------------------------
ex.
$ zbx2slack-alert-notify.py \
--zabbix-server-url "http://zabbix.example.com/zabbix" \
--slack-botname "Zabbix Alert" \
--slack-incoming-webhook-url "https://hooks.slack.com/services/xxxxxxxxx/xxxxxxxxx/...." \
--trigger-id "{TRIGGER.ID}" \
--trigger-name "{TRIGGER.NAME}" \
--trigger-status "{TRIGGER.STATUS}" \
--trigger-severity "{TRIGGER.SEVERITY}" \
--event-id "{EVENT.ID}" \
--item "{HOST.NAME1}|{ITEM.NAME1}|{ITEM.KEY1}|{ITEM.VALUE1}|{ITEM.ID1}" \
--item "{HOST.NAME2}|{ITEM.NAME2}|{ITEM.KEY2}|{ITEM.VALUE2}|{ITEM.ID2}" \
--item "{HOST.NAME3}|{ITEM.NAME3}|{ITEM.KEY3}|{ITEM.VALUE3}|{ITEM.ID3}"
{{{
'''
parser = argparse.ArgumentParser(
description='Zabbix Alert Notification Script for Slack.')
parser.add_argument('--zabbix-server-url',
default=SCRIPT_ENV['ZABBIX_SERVER_URL'],
help='Your Zabbix server URL (Default: "")'.format(SCRIPT_ENV['ZABBIX_SERVER_URL']),
type=str)
parser.add_argument('--slack-botname', default='Zabbix Alert',
type=str, help='Slack Bot name (Default: "Zabbix Alert")')
parser.add_argument('--slack-incoming-webhook-url',
default=SCRIPT_ENV['INCOMING_WEBHOOK_URL'],
help='Slack Bot name (Default: "{0}")'.format(SCRIPT_ENV['INCOMING_WEBHOOK_URL']),
type=str)
parser.add_argument('--trigger-id',
type=int, help='Set Zabbix Macro "{TRIGGER.ID}"')
parser.add_argument('--trigger-name',
type=str, help='Set Zabbix Macro "{TRIGGER.NAME}"')
parser.add_argument('--trigger-status',
type=str, help='Set Zabbix Macro "{TRIGGER.STATUS}"' )
parser.add_argument('--trigger-severity',
type=str, help='Set Zabbix Macro "{TRIGGER.SEVERITY}"')
parser.add_argument('--event-id',
type=int, help='Set Zabbix Macro "{EVENT.ID}"')
parser.add_argument('--item', action='append',
type=str, help='Set Zabbix Macro formated by'
'"{HOST.NAME1}|{ITEM.NAME1}|{ITEM.KEY1}|{ITEM.VALUE1}|{ITEM.ID1}"')
parser.add_argument('--version', action='version',
version='{0} {1}'.format(__scriptname__, __version__))
args = parser.parse_args()
'''
--------------------------------
}}}
'''
notice = noticeInfo(args)
alert_to_slack(
notice.payload,
args.slack_incoming_webhook_url)
if __name__ == '__main__':
main() | zbx2slack | /zbx2slack-0.1.1.tar.gz/zbx2slack-0.1.1/zbx2slack.py | zbx2slack.py |
import os
import requests
import json
import sys
import random
import logging
class zapi(object):
def __init__(self, user=None, password=None, url=None):
url = url
user = user
password = password
self.url = url
self.login(user, password, url)
def login(self, username, password, url):
self.username = username
self.password = password
self.url = url
data_auth = {"jsonrpc": "2.0", "method": "user.login",
"params": {"user": self.username, "password": self.password }, "id": 1 }
try:
answer_auth = requests.post(url=self.url, json=data_auth)
pretty_auth = answer_auth.json()
self.auth_token = pretty_auth['result']
except:
logging.error("Zbx auth error. Check user,pass and url")
def request(self, method, params, action_string):
global answer, pretty
self.method = method
self.params = params
auth_token = self.auth_token
url = self.url
self.action_string = action_string
ids = int(random.random() * 100)
data = {"jsonrpc": "2.0", "method": method, "params": params,
"auth": auth_token,"id": ids}
try:
answer = requests.post(url=url, json=data)
except Exception, e:
error_str = action_string + ". Error. Wrong request " + str(e)
logging.warning(error_str)
if answer.status_code == 200:
pretty = answer.json()
if "result" in answer.json():
error_str = action_string + " Success"
logging.debug(error_str)
return pretty
else:
error_str = action_string + ". Error in request responce " + pretty['error']['data']
if "already" in pretty['error']['data']:
logging.info(error_str)
else:
logging.warning(error_str)
else:
error_str = action_string + ". Error. Wrong request " + str(answer.status_code)
logging.warning(error_str)
def logout(self):
data_auth = {"jsonrpc": "2.0", "method": "user.logout", "auth_token" : self.auth_token, "params": [] }
answer_auth = requests.post(url=self.url, json=data_auth)
pretty_auth = answer_auth.json() | zbxapi | /zbxapi-0.0.2.tar.gz/zbxapi-0.0.2/zapi/zabbix_api.py | zabbix_api.py |
import struct
import time
import socket
import logging
try:
import json
except:
import simplejson as json
class Metric(object):
def __init__(self, host, key, value, clock=None):
self.host = host
self.key = key
self.value = value
self.clock = clock
def __repr__(self):
if self.clock is None:
return 'Metric(%r, %r, %r)' % (self.host, self.key, self.value)
return 'Metric(%r, %r, %r, %r)' % (self.host, self.key, self.value, self.clock)
def send_to_zabbix(metrics, zabbix_host='127.0.0.1', zabbix_port=10051, timeout=15):
"""Send set of metrics to Zabbix server."""
j = json.dumps
# Zabbix has very fragile JSON parser, and we cannot use json to dump whole packet
metrics_data = []
for m in metrics:
clock = m.clock or time.time()
metrics_data.append(('\t\t{\n'
'\t\t\t"host":%s,\n'
'\t\t\t"key":%s,\n'
'\t\t\t"value":%s,\n'
'\t\t\t"clock":%s}') % (j(m.host), j(m.key), j(m.value), clock))
json_data = ('{\n'
'\t"request":"sender data",\n'
'\t"data":[\n%s]\n'
'}') % (',\n'.join(metrics_data))
data_len = struct.pack('<Q', len(json_data))
packet = 'ZBXD\1' + data_len + json_data
try:
zabbix = socket.socket()
zabbix.connect((zabbix_host, zabbix_port))
zabbix.settimeout(timeout)
# send metrics to zabbix
zabbix.sendall(packet)
# get response header from zabbix
resp_hdr = _recv_all(zabbix, 13)
if not resp_hdr.startswith('ZBXD\1') or len(resp_hdr) != 13:
logger.error('Wrong zabbix response')
return False
resp_body_len = struct.unpack('<Q', resp_hdr[5:])[0]
# get response body from zabbix
resp_body = zabbix.recv(resp_body_len)
resp = json.loads(resp_body)
logger.debug('Got response from Zabbix: %s' % resp)
logger.info(resp.get('info'))
if resp.get('response') != 'success':
logger.error('Got error from Zabbix: %s', resp)
return False
return True
except socket.timeout, e:
logger.error("zabbix timeout: " + str(e))
return False
except Exception, e:
logger.exception('Error while sending data to Zabbix: ' + str(e))
return False
finally:
zabbix.close()
logger = logging.getLogger('zbxsender')
def _recv_all(sock, count):
buf = ''
while len(buf)<count:
chunk = sock.recv(count-len(buf))
if not chunk:
return buf
buf += chunk
return buf
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
send_to_zabbix([Metric('localhost', 'bucks_earned', 99999)], 'localhost', 10051) | zbxsend | /zbxsend-0.1.6.tar.gz/zbxsend-0.1.6/zbxsend.py | zbxsend.py |
import logging
import ldap3
from .format import get_value, jmes_search
class Ldap:
def __init__(self, host: str, user: str, passwd: str, port=389):
self.__host = host
self.__user = user
self.__passwd = passwd
self.__port = port
@property
def host(self):
if self.__host:
return self.__host
@property
def user(self):
if self.__user:
return self.__user
@property
def passwd(self):
if self.__passwd:
return self.__passwd
@property
def port(self):
if self.__port:
return self.__port
def login(self):
"""
建立 LDAP 远程客户端连接:
:return:
"""
try:
return ldap3.Connection(
server=ldap3.Server(
host=self.host,
port=self.port
),
user=self.user,
password=self.passwd,
auto_bind=True
)
except Exception as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def search_user(self, dn, filter_, results="raw_dn",
search_scope=ldap3.SUBTREE, attributes=ldap3.ALL_ATTRIBUTES):
"""
搜索 LDAP 用户信息:
:param dn:
:param filter_:
:param results:
:param search_scope:
:param attributes:
:return:
"""
# res 只返回真假的 bool 值
client = self.login()
res = client.search(
search_base=dn,
search_scope=search_scope,
search_filter=filter_,
attributes=attributes
)
if client.response:
return client.response[0].get(results) if res else None
def search_usergrp(self, dn, filter_="(cn=*)", attributes=ldap3.NO_ATTRIBUTES):
"""
搜索 LDAP 用户组信息:
:param dn:
:param filter_:
:param attributes:
:return:
"""
client = self.login()
res = client.search(
search_base=dn,
search_scope=ldap3.SUBTREE,
search_filter=filter_,
attributes=attributes
)
if res:
return jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_LDAP_CN"),
data=client.response
)
def clean_usergrp(self, dn):
"""
清除 LDAP 用户组:
:param dn:
:return:
"""
if self.search_usergrp(dn=dn):
for usergrp in self.search_usergrp(dn):
try:
self.login().delete(usergrp)
logging.info("\033[32m成功清除LDAP用户组 '%s'\033[0m", usergrp)
except Exception as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def create_usergrp(self, dn, member: list):
"""
创建 LDAP 用户组:
:param dn:
:param member:
:return:
"""
try:
self.login().add(dn, "groupOfUniqueNames")
if member:
self.update_member(dn, member)
except Exception as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def update_member(self, cn, member: list = None):
"""
更新 LDAP 用户组成员信息:
:param cn:
:param member:
:return:
"""
try:
if member:
self.login().modify(
cn,
{"uniqueMember": [(ldap3.MODIFY_REPLACE, member)]}
)
except Exception as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m") | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/ldapapis.py | ldapapis.py |
import ssl
from ast import literal_eval
import pyVim.connect
import pyVmomi
from lib.utils.format import get_value, DiskCache
from infi.pyvmomi_wrapper.esxcli import EsxCLI
class VMManger:
def __init__(self, host: str, user: str, passwd: str, port: int = 443):
self._host = host
self._user = user
self._passwd = passwd
self._port = port
@property
def content(self):
"""
获取 vCenter Server 的 Content 信息:
1. 建立连接时采用 SmartConnect() 方法,由于版本原因,某些版本中没有 SmartConnectNoSSL() 方法;
2. 建立连接时为避免证书认证,使用 ssl 模块跳过远程证书认证。
:return:
"""
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.verify_mode = ssl.CERT_NONE
return pyVim.connect.SmartConnect(
host=self._host,
user=self._user,
pwd=self._passwd,
port=self._port,
sslContext=ssl_context
).RetrieveContent()
def get_objs(self, obj_type):
"""
根据对象类型获取 vCenter Server 中所有对应的 View:
1. 例如指定 pyVmomi.vim.Datacenter 类型获取所有的 DataCenter 对象;
2. 例如指定 pyVmomi.vim.HostSystem 类型获取所有的 ESXI 主机对象。
:param obj_type:
:return:
"""
return self.content.viewManager.CreateContainerView(
self.content.rootFolder,
[obj_type],
True
).view
def get_obj(self, obj_type, view_name: str):
"""
根据对象类型和 view 名称获取指定 View:
:param obj_type:
:param view_name:
:return:
"""
for view in self.get_objs(obj_type):
if view.name == view_name:
return view
def get_custom_fields(self, view, obj_type, key_: str, field_name: str):
"""
获取 View 对象的自定义参数信息:
1. view 类型可以是 DataCenter,也可以是 ESXI 主机,
在 vCenter Server 中,DataCenter、ESXI、Host 等都可以指定自定义参数;
2. 获取到的 view 的所有自定义参数按照原有的格式写入字典;
3. 最后根据自定义参数字典过滤机房和机柜信息。
:param view:
:param obj_type:
:param key_:
:param field_name:
:return:
"""
esxi_info = dict()
props = dict()
fields = dict()
for prop in view.customValue:
props[prop.key] = prop.value
for field in self.content.customFieldsManager.field:
if field.managedObjectType == obj_type:
if field.key in props.keys():
fields[field.name] = props[field.key]
if fields.get(field_name):
esxi_info[key_] = fields.get(field_name)
return esxi_info
else:
if key_ == "location":
esxi_info[key_] = view.name
return esxi_info
if key_ == "site_rack":
esxi_info[key_] = None
return esxi_info
def find_esxi(self, esxi_name: str):
"""
遍历所有的 DataCenter 和 Compute Cluster 找到 ESXI 主机并获取其机房信息:
:param esxi_name:
:return:
"""
for view in self.get_objs(pyVmomi.vim.Datacenter):
for child in view.hostFolder.childEntity:
if child.name == esxi_name:
return self.get_custom_fields(
view=view,
obj_type=pyVmomi.vim.Datacenter,
key_="location",
field_name="机房名称"
)
if isinstance(child, pyVmomi.vim.ClusterComputeResource):
for host in child.host:
if host.name == esxi_name:
return self.get_custom_fields(
view=view,
obj_type=pyVmomi.vim.Datacenter,
key_="location",
field_name="机房名称"
)
def fetch_esxi(self, esxi_name: str):
"""
获取指定 ESXI 主机信息:
:param esxi_name:
:return:
"""
instance_ = DiskCache()
if instance_.get_cache("esxi_" + esxi_name):
return instance_.get_cache("esxi_" + esxi_name)
if not instance_.get_cache("esxi_" + esxi_name):
esxi_ = self.get_obj(pyVmomi.vim.HostSystem, esxi_name)
esxi_hardware = esxi_.summary.hardware if esxi_ else None
esxi_info = self.find_esxi(esxi_name)
if esxi_info:
esxi_info["type"] = "Server"
esxi_info["name"] = esxi_.name
if esxi_.config:
esxi_info["os"] = esxi_.config.product.name
esxi_info["os_short"] = esxi_.config.product.osType
esxi_info["os"] = "VMware ESXi"
esxi_info["os_short"] = "vmnix-x86"
esxi_info["os_full"] = esxi_.summary.config.product.fullName
esxi_info["model"] = esxi_hardware.model
esxi_info["vendor"] = esxi_hardware.vendor
esxi_info["hardware_full"] = "\n".join(
[
get_value(section="VCENTER", option="VCENTER_CPU", raw=True) % (
esxi_hardware.cpuModel,
esxi_hardware.numCpuPkgs,
esxi_hardware.numCpuCores,
esxi_hardware.numCpuThreads
),
f"内存: {esxi_hardware.memorySize / 1024 / 1024 // 1024}GB"
]
)
# get host's ipA and netmask
if esxi_ and esxi_.config:
for vnic in esxi_.config.network.vnic:
if isinstance(vnic, pyVmomi.vim.host.VirtualNic):
esxi_info["host_networks"] = vnic.spec.ip.ipAddress
esxi_info["alias"] = vnic.spec.ip.ipAddress
esxi_info["host_netmask"] = vnic.spec.ip.subnetMask
# get host's mac address
if esxi_ and esxi_.config:
for portgroup in esxi_.config.network.portgroup:
for port in portgroup.port:
if port.type == "host":
esxi_info["macaddress_a"] = "".join(port.mac)
# get host's serial_number
ordered_keys = literal_eval(
get_value(
section="VCENTER",
option="HOST_SERIAL_ORDERED_KEY"
)
)
sn_info = {}
if esxi_hardware:
for iden in esxi_hardware.otherIdentifyingInfo:
if isinstance(iden, pyVmomi.vim.host.SystemIdentificationInfo) \
and iden.identifierType.key in ordered_keys:
sn_info[iden.identifierType.key] = iden.identifierValue
for key in ordered_keys:
if sn_info.get(key):
esxi_info["serialno_a"] = sn_info.get(key)
break
if esxi_info and esxi_info.get("serialno_a") == "None":
esxi_info["serialno_a"] = EsxCLI(esxi_).get("hardware.platform").Get().SerialNumber
if esxi_info:
esxi_info.update(
self.get_custom_fields(
view=esxi_,
obj_type=pyVmomi.vim.HostSystem,
key_="site_rack",
field_name="机柜"
)
)
instance_.set_cache(
key="esxi_" + esxi_name,
value=esxi_info,
expire=60
)
return instance_.get_cache("esxi_" + esxi_name) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/esxiapis.py | esxiapis.py |
import convertapi
import logging
import hashlib
import configparser
import re
import numpy
import pandas
import jmespath
import time
import traceback
import sys
import json
import os
import IPy
from bisect import bisect_left
from convertapi import ApiError, BaseError
from datetime import datetime
from dateutil.parser import parse
from prettytable import PrettyTable
from openpyxl.utils import get_column_letter
from diskcache import Cache
class IPS:
def __init__(self, check_file):
self.check_file = check_file
@property
def idc_networks(self):
"""
读取 ip_range.json 中的机房网段信息:
:return:
"""
IDC_NETWORKS = []
if self.check_file and os.path.exists(self.check_file):
with open(self.check_file, mode="r", encoding="utf8") as fp:
for value in json.load(fp).values():
IDC_NETWORKS.extend([IPy.IP(net) for net in value])
return IDC_NETWORKS
def valid_ip(self, ip: str):
"""
验证 ip 是否在机房网段内:
:param ip:
:return:
"""
if ip and self.idc_networks:
for net in self.idc_networks:
if ip in net:
return True
return False
def make_timestamp(date: str):
"""
将输入的字符串格式的日期转化为时间戳:
1. date 的格式可以是多种,如:2023-01-10、20230110、2023/01/10 等。
:param date:
:return:
"""
return time.mktime(
time.strptime(
datetime.strftime(
parse(date),
"%Y%m%d"
),
"%Y%m%d"
)
)
def convert_unit(size):
"""
1. 通过二分查找法将 bytes 转换为易读的单位:
:param size:
:return:
"""
factor = bisect_left(
[1024 ** i for i in range(1, 8)],
size
)
return str(round(size / (1024 ** factor), 2)) + "BKMGTPE"[factor]
def convert_pdf(api_secret: str, output_docx: str):
"""
将 Word 文档转化为 PDF 文档:
:param api_secret:
:param output_docx:
:return:
"""
try:
if api_secret:
convertapi.api_secret = api_secret
res = convertapi.convert("pdf", {"File": output_docx})
res.file.save(output_docx.replace(".docx", ".pdf"))
logging.info(
"\033[32mPDF 报表导出完成: %s\033[0m",
os.path.abspath(output_docx) if os.path.exists(output_docx) else ""
)
except (ApiError, BaseError):
logging.error(traceback.format_exc())
sys.exit(-1)
class DiskCache:
def __init__(self):
self._cache = Cache(
get_value(
section="CACHE",
option="CACHE_FILE"
)
)
def set_cache(self, key, value, expire=None, retry=True):
"""
设置本地文件缓存:
:param key:
:param value:
:param expire:
:param retry:
:return:
"""
self._cache.set(
key=key,
value=value,
expire=expire,
retry=retry
)
def get_cache(self, key) -> str:
"""
读取本地文件缓存:
:param key:
:return:
"""
return self._cache.get(key=key)
def get_value(section: str, option: str, raw: bool = False) -> str:
"""
读取 configs.ini 配置文件中的参数信息:
1. configs.ini 配置文件中以 section 作为分类,section 区分大小写;
2. section 由 option 组成(类似于 "k = v"),option 不区分大小写;
3. option 中的 value 可以包含占位符,但在读取时必须指定 "raw=True" 参数,否则将被作为变量处理。
:param section:
:param option:
:param raw:
:return:
"""
configs = configparser.ConfigParser()
configs.read(
filenames=os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"configs.ini"
),
encoding="utf-8"
)
return configs.get(
section=section,
option=option,
raw=raw
)
def pretty_tbl(title: str, field_names: list, rows: list):
"""
实例化表格输出对象:
:param title:
:param field_names:
:param rows:
:return:
"""
tbl = PrettyTable()
tbl.title = title
tbl.field_names = field_names
tbl.add_rows(rows=rows)
return tbl
def jmes_search(jmes_rexp: str, data: list or dict, options=None):
"""
通过 Jmes 处理 Json 字符串:
:param jmes_rexp:
:param data:
:param options:
:return:
"""
try:
return jmespath.search(
expression=jmes_rexp,
data=data,
options=options
)
except Exception as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def md5_(text: str) -> str:
"""
生成 hash 信息摘要:
1. 采用 md5 加密算法;
2. 为支持中文,需要编码为 utf-8。
:param text: 要加密的字符串
:return:
"""
md5 = hashlib.md5()
if text:
md5.update(text.encode("utf-8"))
return md5.hexdigest()
def re_search(rexp: str, content: str, mode=re.X) -> bool:
"""
正则表达式搜索字符串:
:param rexp:
:param content:
:param mode:
:return:
"""
return bool(re.compile(rexp, mode).search(content))
def re_findall(rexp: str, content: str, mode=re.X) -> list:
"""
正则表达式匹配所有符合条件的字符串:
:param rexp:
:param content:
:param mode:
:return:
"""
return re.compile(rexp, mode).findall(content)
def re_sub(rexp: str, replaced_str: str, origin_str: str) -> str:
"""
正则表达式查找字符串并替换:
:param rexp:
:param replaced_str:
:param origin_str:
:return:
"""
return re.compile(rexp).sub(replaced_str, origin_str)
def to_excel_(df: pandas.DataFrame, fname: str, shname: str = "Sheet1"):
"""
利用 Pandas、Numpy 自动设置单元格列宽并导出为 Execl 文件:
1. 如果要操作 "xlsx" 格式的文件,需要将 ExcelWriter 的引擎设置为 "openpyxl";
2. with 语句会自动保存文件。
:param df: Pandas DataFrame
:param fname: Execl Filename
:param shname: Execl Sheet Name
:return:
"""
widths = numpy.max(
[
# 计算每列表头的字符宽度
df.columns.to_series().apply(lambda x: len(x.encode("utf-8"))).values,
# 计算每列的最大字符宽度
df.astype(str).applymap(lambda x: len(x.encode("utf-8"))).agg(max).values
],
axis=0
)
with pandas.ExcelWriter(
fname,
engine=get_value(section="EXCEL", option="PANDAS_WRITE_EXCEL_ENGINE")
) as writer:
df.to_excel(excel_writer=writer, sheet_name=shname, index=False)
for i, width in enumerate(widths, start=1):
writer.sheets[shname].column_dimensions[get_column_letter(i)].width = width + 2 | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/format.py | format.py |
import collections
from .format import get_value, jmes_search
def parse_inventory_tag_literal(inventory_tag: str):
"""
将一连串以英文分号分割的 "k=v" 转化为字典:
:param inventory_tag:
:return:
"""
res = {}
if inventory_tag is None:
return res
if not isinstance(inventory_tag, str):
raise TypeError()
tags = inventory_tag.strip().split(";")
for item in tags:
item = item.strip()
pos = item.find("=")
if pos < 0:
key = item.rstrip()
if len(key) > 0:
res[key] = None
elif pos > 0:
key = item[0: pos].rstrip()
if len(key) > 0:
res[key] = item[pos + 1:].lstrip()
return res
class InventoryTagDict(collections.UserDict):
"""
处理 host inventory 中的 tag 字段:
1. 由于 Zabbix 3.0/4.0 版本 host 不支持 tag 功能,缓解方法是在 host inventory 的 tag 字段定稿相应的 tag;
2. inventory tag 的格式规范是以分号分隔的 k=v 或者 k 字符串,如:key1=val1;key2;key3=val3。
"""
def __init__(self, inventory_tag=None):
super().__init__(parse_inventory_tag_literal(inventory_tag))
def __setitem__(self, key, item):
if not isinstance(item, str) and item is not None:
raise TypeError()
super().__setitem__(key, item)
def __str__(self):
res = ""
for k, v in self.data.items():
res += k
if v:
res += ("=" + v)
res += ";"
return res
class HostTags:
"""
处理 Zabbix Host 的 tag:
1. Zabbix 6.0 版本中 Host 已支持 tag;
2. 主要涉及到 Host tag 的新增和删除操作。
"""
def __init__(self, tags: list):
self._tags = tags
@property
def format_tags(self) -> list:
"""
去除 host tag 字典中 "automatic" key:
1. 一个 host 的 tag 字典中主要包含三个 key: tag、value、automatic;
2. 在通过 Zabbix Api 操作 host tag 时,tag 字典中不能包含 "automatic" key,否则会报错。
:return:
"""
if self._tags:
for tag in self._tags:
if isinstance(tag, dict) and "automatic" in tag.keys():
del tag["automatic"]
return self._tags
return []
def have(self, tag_name: str) -> bool:
"""
判断 host 中是否包含某个 tag:
:param tag_name: host tag Name
:return:
"""
return bool(
jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_HOST_TAG",
raw=True
) % tag_name,
data=self._tags
)
)
def added_tags(self, tag_name: str, tag_value: str or int or None):
"""
往 host 中添加 tag 并返回字典列表:
:param tag_name: host tag Name
:param tag_value: host tag Value
:return:
"""
if not self.have(tag_name):
self._tags.append(
{
"tag": tag_name,
"value": tag_value
}
)
return self.format_tags
return self.format_tags
def get_del_tag_value(self, tag_name: str):
if self._tags:
for host_tag in self._tags:
if host_tag.get("tag") and host_tag.get("tag") == tag_name:
return host_tag.get("value")
def deleted_tags(self, tag_name: str):
"""
从 host 中删除 tag 并返回字典列表:
:param tag_name: host tag Name
:return:
"""
if self.have(tag_name):
del self._tags[
self._tags.index(
{
"tag": tag_name,
"value": self.get_del_tag_value(tag_name)
}
)
]
return self.format_tags
return self.format_tags
class ItemTags(HostTags):
def __init__(self, tags: list):
self._tags = tags
super().__init__(tags)
def added_item_tags(self, tag_name: str, tag_value: str or int or None):
"""
为 Item 添加标签:
1. 在 Zabbix 旧版本中(如 3.0、4.0),添加监控项时可以将监控项加入对应的 Application,
但是在 Zabbix 6.0 版本中已经全面舍弃了 Application;
2. 虽然新版本舍弃了 Application,但是可以给 Item 分配标签,标签名称默认还是 "Application"。
:param tag_name:
:param tag_value:
:return:
"""
if self.have(tag_name) and tag_name == "Application":
for tag in self._tags:
if tag.get("tag") == "Application":
tag["value"] = tag_value
return self._tags
if not self.have(tag_name):
return self.added_tags(tag_name=tag_name, tag_value=tag_value) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/zbxtags.py | zbxtags.py |
from docx.oxml import OxmlElement
from docx.shared import Inches, Pt
from docx.oxml.ns import qn
from docx.shared import RGBColor
def set_cell_border(cell, **kwargs):
tcPr = cell._tc.get_or_add_tcPr()
tcBorders = tcPr.first_child_found_in("w:tcBorders")
if tcBorders is None:
tcBorders = OxmlElement("w:tcBorders")
tcPr.append(tcBorders)
for edge in ("start", "top", "end", "bottom", "insideH", "insideV"):
edge_data = kwargs.get(edge)
if edge_data:
tag = "w:{}".format(edge)
element = tcBorders.find(qn(tag))
if element is None:
element = OxmlElement(tag)
tcBorders.append(element)
for key in ["sz", "val", "color", "space", "shadow"]:
if key in edge_data:
element.set(qn("w:{}".format(key)), str(edge_data[key]))
def colored_cell(cell, value):
"""
根据 value 设置cell的字体颜色。
85 < value <= 90, RGB_YELLOW
90 < value <= 95, RGB_ORANGE
95 < value, RGB_RED
"""
RGB_YELLOW = 255, 165, 0
RGB_ORANGE = 255, 102, 0
RGB_RED = 255, 0, 0
if value >= 100:
value = 99.99
run = cell.paragraphs[0].add_run(str("%.2f" % value) + "%")
if 85 < value <= 90:
run.font.color.rgb = RGBColor(*RGB_YELLOW)
elif 90 < value <= 95:
run.font.color.rgb = RGBColor(*RGB_ORANGE)
elif 95 < value:
run.font.color.rgb = RGBColor(*RGB_RED)
class PyDocs:
def __init__(self, doc):
self._doc = doc
def add_summarize_table(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=12, style=style)
tbl.autofit = False
tbl.cell(0, 0).width, tbl.cell(0, 0).text = Inches(1), "主机组名称"
tbl.cell(0, 1).width, tbl.cell(0, 1).text = Inches(0.5), "主机数量"
tbl.cell(0, 2).width, tbl.cell(0, 2).text = Inches(0.75), "CPU平均利用率"
tbl.cell(0, 3).width, tbl.cell(0, 3).text = Inches(0.75), "内存总量"
tbl.cell(0, 4).width, tbl.cell(0, 4).text = Inches(0.75), "内存最高利用率"
tbl.cell(0, 5).width, tbl.cell(0, 5).text = Inches(0.75), "内存最低利用率"
tbl.cell(0, 6).width, tbl.cell(0, 6).text = Inches(0.75), "内存平均利用率"
tbl.cell(0, 7).width, tbl.cell(0, 7).text = Inches(0.75), "磁盘总量"
tbl.cell(0, 8).width, tbl.cell(0, 8).text = Inches(0.75), "磁盘最高使用率"
tbl.cell(0, 9).width, tbl.cell(0, 9).text = Inches(0.75), "磁盘最低使用率"
tbl.cell(0, 10).width, tbl.cell(0, 10).text = Inches(0.75), "磁盘平均使用率"
tbl.cell(0, 11).width, tbl.cell(0, 11).text = Inches(0.75), "严重告警数量"
return tbl
def add_vm_table(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=3, style=style)
tbl.cell(0, 0).width, tbl.cell(0, 0).text = Inches(3.0), "主机组名称"
tbl.cell(0, 1).width, tbl.cell(0, 1).text = Inches(4.0), "主机名称"
tbl.cell(0, 2).width, tbl.cell(0, 2).text = Inches(0.7), "CPU平均使用率"
return tbl
def add_total_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=4, cols=2, style=style)
tbl.cell(0, 0).text = "统计日期"
tbl.cell(1, 0).text = "主机组数量"
tbl.cell(2, 0).text = "主机数量"
tbl.cell(3, 0).text = "严重告警数量"
return tbl
def add_detail_table(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=3, cols=6, style=style)
cell_left = tbl.cell(0, 0)
cell_right = tbl.cell(0, 5)
header = cell_left.merge(cell_right)
header.text = "各级别告警数量"
tbl.cell(1, 0).text = "未分类"
tbl.cell(1, 1).text = "通知"
tbl.cell(1, 2).text = "警示"
tbl.cell(1, 3).text = "严重"
tbl.cell(1, 4).text = "危险"
tbl.cell(1, 5).text = "灾难"
return tbl
def add_event_grp_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=3, style=style)
tbl.cell(0, 0).width = Inches(5)
tbl.cell(0, 1).width = Inches(1)
tbl.cell(0, 2).width = Inches(2.5)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机数量"
tbl.cell(0, 2).text = "严重告警数量"
return tbl
def add_event_host_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=3, style=style)
tbl.cell(0, 0).width = Inches(3.0)
tbl.cell(0, 1).width = Inches(3.4)
tbl.cell(0, 2).width = Inches(1.3)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机名称"
tbl.cell(0, 2).text = "严重告警数量"
return tbl
def add_mem_grp_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=4, style=style)
tbl.cell(0, 0).width = Inches(5)
tbl.cell(0, 1).width = Inches(0.5)
tbl.cell(0, 2).width = Inches(1.7)
tbl.cell(0, 3).width = Inches(1.3)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机数量"
tbl.cell(0, 2).text = "内存平均使用率"
tbl.cell(0, 3).text = "内存总量"
return tbl
def add_mem_host_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=4, style=style)
tbl.cell(0, 0).width = Inches(3.0)
tbl.cell(0, 1).width = Inches(3.4)
tbl.cell(0, 2).width = Inches(1.3)
tbl.cell(0, 3).width = Inches(1.3)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机名称"
tbl.cell(0, 2).text = "内存平均使用率"
tbl.cell(0, 3).text = "内存总量"
return tbl
def add_cpu_grp_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=3, style=style)
tbl.cell(0, 0).width = Inches(5)
tbl.cell(0, 1).width = Inches(1.0)
tbl.cell(0, 2).width = Inches(1.2)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机数量"
tbl.cell(0, 2).text = "CPU平均使用率"
return tbl
def add_cpu_host_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=3, style=style)
tbl.cell(0, 0).width = Inches(3.5)
tbl.cell(0, 1).width = Inches(3.5)
tbl.cell(0, 2).width = Inches(0.7)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机名称"
tbl.cell(0, 2).text = "CPU平均使用率"
return tbl
def add_disk_grp_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=4, style=style)
tbl.cell(0, 0).width = Inches(5)
tbl.cell(0, 1).width = Inches(0.5)
tbl.cell(0, 2).width = Inches(1.7)
tbl.cell(0, 3).width = Inches(1.3)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机数量"
tbl.cell(0, 2).text = "磁盘平均使用率"
tbl.cell(0, 3).text = "磁盘总量"
return tbl
def add_disk_disk_tbl(self, style="Medium Shading 1 Accent 1"):
tbl = self._doc.add_table(rows=1, cols=4, style=style)
tbl.cell(0, 0).width = Inches(3.0)
tbl.cell(0, 1).width = Inches(3.4)
tbl.cell(0, 2).width = Inches(1.3)
tbl.cell(0, 3).width = Inches(1.3)
tbl.cell(0, 0).text = "主机组名称"
tbl.cell(0, 1).text = "主机名称"
tbl.cell(0, 2).text = "磁盘平均使用率"
tbl.cell(0, 3).text = "磁盘总量"
return tbl
def add_para(self, run_, pt_, rgb_, alignment, para_content=""):
para = self._doc.add_paragraph(para_content)
para.alignment = alignment
run = para.add_run(run_)
run.bold = True
run.font.size = Pt(pt_)
run.font.color.rgb = RGBColor(*rgb_)
def add_heading(self, level, run_, pt_, font_name="微软雅黑", qn_=qn("w:eastAsia"), heading=""):
heading = self._doc.add_heading(heading, level=level).add_run(run_)
heading.font.name = font_name
heading.font.size = Pt(pt_)
heading._element.rPr.rFonts.set(qn_, font_name) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/docs.py | docs.py |
import logging
from elasticsearch import Elasticsearch, helpers
from elasticsearch import ConnectionError, SSLError, RequestError, NotFoundError
class ESManager:
def __init__(self, url: str, user: str, passwd: str):
self.__url = url
self.__user = user
self.__passwd = passwd
@property
def client(self):
"""
建立 ElasticSearch 连接:
1. 默认为免密连接;
2. 也可以指定用户名和密码。
:return:
"""
try:
return Elasticsearch(
self.__url,
http_auth=(self.__user, self.__passwd)
)
except (ConnectionError, SSLError) as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def bulk(self, actions: list, index: str):
"""
创建 ElasticSearch 索引:
1. 通过 bulk() 方法可以在单个连接中执行多个操作,极大地提升索引性能。
:param actions:
:param index:
:return:
"""
try:
helpers.bulk(
client=self.client,
actions=actions,
index=index,
raise_on_error=True
)
except (ConnectionError, SSLError) as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def get_es_tpl(self, tpl_name: str):
"""
根据模板名称获取 ElasticSearch 模板信息:
:param tpl_name:
:return:
"""
try:
tpl = self.client.indices.get_template(name=tpl_name)
if tpl:
return tpl.get(tpl_name)
except (RequestError, NotFoundError) as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
def put_template(self, tpl_name: str, body: dict):
"""
推送 ElasticSearch 模板:
:param tpl_name:
:param body:
:return:
"""
try:
tpl = self.get_es_tpl(tpl_name=tpl_name)
# 当指定的模板存在时,则 Merge mappings 到指定的模板
tpl.update(body) if tpl else None
self.client.indices.put_template(
name=tpl_name,
body=tpl if tpl else body,
# "create" 设置为 False 时,如果不存在这个模板则创建,如果存在则更新
create=False
)
except (RequestError, NotFoundError) as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m") | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/esapis.py | esapis.py |
## utils 目录下各脚本功能介绍
* ```esapis.py```: 提供操作 ES 集群的方法,包括通过 bulk() 方法创建 ES 索引等。
* ```esxiapis.py```: 提供操作 vCenter Server 的方法,包括获取 Content、搜索对象、搜索 ESXI 主机、获取 ESXI 主机信息等。
* ```ldapapis.py```: 提供操作 Open LDAP 的方法,包括搜索用户、搜索用户组、清除用户组、创建用户组、更新用户组成员等。
* ```wxapis.py```: 提供操作企业微信接口的方法,包括获取所有部门信息、获取部门id、获取部门员工信息等。
* ```zbxapis.py```: 提供操作 Zabbix Server 6.0 接口的方法,包括各种信息获取、更新、创建、删除等。
* ```zbxtags.py```: 提供操作 Zabbix tag 的方法,主要包括 Zabbix Inventory tag 和 Host tag。
* ```format.py```: 提供各种格式化操作方法,包括缓存设置、正则表达式匹配、表格化输出、Execl 表格处理等。
* ```docs.py```: 创建 Zabbix 资源使用率中用到的各类 Word 文档表格。
| zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/README.md | README.md |
import logging
import sys
from weworkapi.CorpApi import CORP_API_TYPE, CorpApi
from weworkapi.AbstractApi import ApiException
from .format import jmes_search, DiskCache, get_value
class WxWorkApi:
def __init__(self, corpid: str, agentid: str, secret: str):
# 企业 id
self.__corpid = corpid
# 应用 id
self.__agentid = agentid
# 应用 secret
self.__secret = secret
self._corpapi = CorpApi(self.__corpid, self.__secret)
@property
def corpid(self):
"""readonly"""
return self.__corpid
@property
def secret(self):
"""readonly"""
return self.__secret
@property
def token(self):
"""
Get token, expire time is 7100s
1. 每个应用有独立的 secret,获取到的 access_token 只能本应用使用;
2. 每个应用的 access_token 应该分开来获取。
:return:
"""
instance_ = DiskCache()
if self.__agentid:
key = "token_" + str(self.__corpid) + "_" + str(self.__agentid)
if instance_.get_cache(key):
return instance_.get_cache(key)
if self.secret:
try:
instance_.set_cache(
key=key,
value=self._corpapi.getAccessToken(),
expire=int(
get_value(
section="CACHE",
option="TOKEN_EXPIRE_TIME"
)
)
)
except ApiException as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
else:
return instance_.get_cache(key)
def _wework_request(self, api_type: str, params=None):
"""
封装对corpapi的调用
"""
try:
if self.token:
self._corpapi.access_token = self.token
return self._corpapi.httpCall(api_type, params)
except ApiException as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
sys.exit(1)
@property
def departs(self):
"""
获取企业微信中所有的部门信息:
:return:
"""
return self._wework_request(
api_type=CORP_API_TYPE["DEPARTMENT_LIST"]
)
def get_depid(self, dep_name: str):
"""
根据部门名称获取部门 id:
:param dep_name:
:return:
"""
if self.departs:
depid = jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_WEWORK_DEP_NAME",
raw=True
) % dep_name,
data=self.departs.get("department")
)
return str(depid[0]) if depid else None
def get_dep_users(self, dep_name: str):
"""
根据部门名称获取其所有员工信息:
:param dep_name:
:return:
"""
if not self.get_depid(dep_name):
logging.error("\033[31m" + "企业微信中未找到部门: %s。" + "\033[0m", dep_name)
sys.exit(1)
users = self._wework_request(
api_type=CORP_API_TYPE["USER_LIST"],
params={
"department_id": self.get_depid(dep_name),
"fetch_child": "1"
}
)
return users["userlist"] if users and users["userlist"] else None | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/wxapis.py | wxapis.py |
import logging
import sys
from zabbix_api import ZabbixAPIException
from .format import re_search, get_value
class ZabbixApiGet:
"""Zabbix Get Methods"""
def __init__(self, zapi):
self._zapi = zapi
def _zbx_request(self, method: str, params: dict or list):
"""
对于 zapi 的封装,等同于 zapi.MODULE.METHOD(PARAMS)。
:param method:
:param params:
:return:
"""
try:
module, func = method.split(r".")
return getattr(getattr(self._zapi, module), func)(params)
except ZabbixAPIException as err:
logging.error(msg="\033[31m" + str(err) + "\033[0m")
sys.exit(1)
def get_ht_grps(self, output=None, filter_=None, selecthosts=None,
monitored_hosts=True, real_hosts=False, with_monitored_items=False,
sortfield="name", **kwargs):
"""
Get Zabbix host groups info:
:param output: Object properties to be returned
:param filter_: Return only those results that exactly match the given filter
:param selecthosts: Return a hosts property with the hosts that belong to the host group
:param monitored_hosts: Return only host groups that contain monitored hosts
:param real_hosts: Return only host groups that contain hosts
:param with_monitored_items: Return only host groups that contain hosts with enabled items
:param sortfield: Sort the result by the given properties
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"filter": filter_,
"selectHosts": selecthosts,
"real_hosts": real_hosts,
"with_monitored_items": with_monitored_items,
"monitored_hosts": monitored_hosts,
"sortfield": sortfield
}
default_dict.update(**kwargs)
return self._zbx_request(
method="hostgroup.get",
params=default_dict
)
def get_hts(self, output=None, selectparenttemplates=None, selectinventory=None,
searchinventory=None, selectgroups=None, selectinterfaces=None, filter_=None,
hostids=None, searchbyany=False, groupids=None, selecttriggers=None,
selectmacros=None, monitored_hosts=True, with_monitored_items=False,
selecttags=None, search=None, tags=None, selecthostdiscovery=None, **kwargs):
"""
Get Zabbix hosts info:
:param selecthostdiscovery: Return a hostDiscovery property with host discovery object data
:param tags: Return only hosts with given tags
Exact match by tag and case-sensitive or case-insensitive search by tag value
depending on operator value.
Format: [{"tag": "<tag>", "value": "<value>", "operator": "<operator>"}, ...]
:param output: Object properties to be returned
:param selectparenttemplates: Return a parentTemplates property with templates that the host is linked to
:param selectinventory: Return an inventory property with host inventory data
:param searchinventory: Return only hosts that have inventory data matching the given wildcard search
:param selectgroups: Return a groups property with host groups data that the host belongs to
:param selectinterfaces: Return an interfaces property with host interfaces
:param filter_: Return only those results that exactly match the given filter
:param hostids: Return only hosts with the given host IDs
:param searchbyany: If set to true return results that match any of the criteria given in the filter or
search parameter instead of all of them
:param groupids: Return only hosts that belong to the given groups
:param selecttriggers: Return a triggers property with host triggers
:param selectmacros: Return a macros property with host macros
:param monitored_hosts: Return only monitored hosts
:param with_monitored_items: Return only hosts that have enabled items
:param selecttags: Return a tags property with host tags
:param search: Return results that match the given wildcard search
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"selectParentTemplates": selectparenttemplates,
"selectInventory": selectinventory,
"searchInventory": searchinventory,
"selectGroups": selectgroups,
"selectInterfaces": selectinterfaces,
"filter": filter_,
"hostids": hostids,
"searchByAny": searchbyany,
"groupids": groupids,
"selectTriggers": selecttriggers,
"selectMacros": selectmacros,
"monitored": monitored_hosts,
"with_monitored_items": with_monitored_items,
"selectTags": selecttags,
"search": search,
"tags": tags,
"selectHostDiscovery": selecthostdiscovery
}
default_dict.update(**kwargs)
return self._zbx_request(
method="host.get",
params=default_dict
)
def get_tpls(self, filter_=None, output=None, **kwargs):
"""
Get Zabbix templates info:
:param filter_: Return only those results that exactly match the given filter
:param output: Object properties to be returned
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"filter": filter_,
"output": output
}
default_dict.update(**kwargs)
return self._zbx_request(
method="template.get",
params=default_dict
)
def get_usr_grps(self, output=None, filter_=None, searchwildcardsenabled=False,
selectusers=None, search=None, **kwargs):
"""
Get Zabbix user groups info:
:param output: Object properties to be returned
:param filter_: Return only those results that exactly match the given filter
:param searchwildcardsenabled: If set to true enables the use of "*" as a wildcard character
in the search parameter
:param selectusers: Return the users from the user group in the users property
:param search: Return results that match the given wildcard search (case-insensitive)
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"filter": filter_,
"searchWildcardsEnabled": searchwildcardsenabled,
"selectUsers": selectusers,
"status": 0,
"search": search
}
default_dict.update(**kwargs)
return self._zbx_request(
method="usergroup.get",
params=default_dict
)
def get_zbx_users(self, output=None, usrgrpids=None, filter_=None,
selectmedias=None, selectmediatypes=None, **kwargs):
"""
Get Zabbix users info:
:param selectmediatypes: Return media types used by the user in the mediatypes property
:param selectmedias: Return media used by the user in the medias property
:param output: Object properties to be returned
:param usrgrpids: Return only users that belong to the given user groups
:param filter_: Return only those results that exactly match the given filter
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"usrgrpids": usrgrpids,
"selectMedias": selectmedias,
"selectMediatypes": selectmediatypes,
"filter": filter_
}
default_dict.update(**kwargs)
zbx_users = self._zbx_request(
method="user.get",
params=default_dict
)
for zbx_user in zbx_users:
rexp = get_value(section="REXP", option="REXP_CH")
if zbx_user.get("surname") and re_search(rexp, zbx_user.get("surname")) \
and zbx_user.get("name") and re_search(rexp, zbx_user.get("name")):
# 添加 fullname 和 fullname_reverse,即 "姓+名" 和 "名+姓"(针对于中文名称)
zbx_user["fullname"] = zbx_user.get("surname") + zbx_user.get("name")
zbx_user["fullname_reverse"] = zbx_user.get("name") + zbx_user.get("surname")
return zbx_users
def get_drules(self, output=None, selectdchecks=None, search=None, selectdhosts=None,
searchwildcardsenabled=False, **kwargs):
"""
Get Zabbix discovery rules info:
:param selectdhosts: Return a dhosts property with the discovered hosts created by the discovery rule
:param selectdchecks: Return a dchecks property with the discovery checks used by the discovery rule
:param output: Object properties to be returned
:param search: Return results that match the given wildcard search (case-insensitive)
:param searchwildcardsenabled: If set to true enables the use of "*" as a wildcard character
in the search parameter
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"selectDChecks": selectdchecks,
"selectDHosts": selectdhosts,
"search": search,
"searchWildcardsEnabled": searchwildcardsenabled
}
default_dict.update(**kwargs)
return self._zbx_request(
method="drule.get",
params=default_dict
)
def get_dservices(self, output=None, druleids=None, selectdrules=None,
selecthosts=None, **kwargs):
"""
Get Zabbix discovery services info:
:param output: Object properties to be returned
:param druleids: Return only discovered services that have been detected by the given discovery rules
:param selectdrules: Return a drules property with an array of the discovery rules that detected the service
:param selecthosts: Return a hosts property with the hosts with the same IP address and proxy as the service
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"druleids": druleids,
"selectDRules": selectdrules,
"selectHosts": selecthosts,
}
default_dict.update(**kwargs)
return self._zbx_request(
method="dservice.get",
params=default_dict
)
def get_items(self, hostids=None, search=None, filter_=None,
selectinterfaces=None, monitored=True, selecthosts=None, selecttags=None, **kwargs):
"""
Get Zabbix items info:
:param selecttags: Return the item tags in tags property
:param hostids: Return only items that belong to the given hosts
:param search: Return results that match the given wildcard search (case-insensitive)
:param filter_: Return only those results that exactly match the given filter
:param selectinterfaces: Return an interfaces property with an array of host interfaces used by the item
:param monitored: If set to true return only enabled items that belong to monitored hosts
:param selecthosts: Return a hosts property with an array of hosts that the item belongs to
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"hostids": hostids,
"search": search,
"filter": filter_,
"monitored": monitored,
"selectInterfaces": selectinterfaces,
"selectHosts": selecthosts,
"selectTags": selecttags
}
default_dict.update(**kwargs)
return self._zbx_request(
method="item.get",
params=default_dict
)
def get_medias(self, filter_=None, selectusers=None, output=None, **kwargs):
"""
Get Zabbix media types info:
:param filter_: Return only those results that exactly match the given filter
:param selectusers: Return a users property with the users that use the media type
:param output: Object properties to be returned
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"filter": filter_,
"selectUsers": selectusers,
"output": output
}
default_dict.update(**kwargs)
return self._zbx_request(
method="mediatype.get",
params=default_dict
)
def get_actions(self, output=None, selectoperations=None, filter_=None, **kwargs):
"""
Get Zabbix actions info:
:param output: Object properties to be returned
:param selectoperations: Return an operations property with action operations
:param filter_: Return only those results that exactly match the given filter
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"selectOperations": selectoperations,
"filter": filter_
}
default_dict.update(**kwargs)
return self._zbx_request(
method="action.get",
params=default_dict
)
def get_service(self, filter_=None, parentids=None, selectchildren=None,
selectparents=None, serviceids=None, **kwargs):
"""
Get Zabbix services info:
:param selectparents: Return a parents property with the parent services
:param filter_: Return only those results that exactly match the given filter
:param parentids: Return only services that are linked to the given parent services
:param selectchildren: Return a children property with the child services
:param serviceids: Return only services with the given IDs
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"filter": filter_,
"parentids": parentids,
"selectParents": selectparents,
"selectChildren": selectchildren,
"serviceids": serviceids
}
default_dict.update(**kwargs)
return self._zbx_request(
method="service.get",
params=default_dict
)
def get_events(self, output=None, countoutput=False, value=None, severities=None, time_from=None,
time_till=None, selecthosts=None, hostids=None, **kwargs):
"""
Get Zabbix events info:
:param output: Object properties to be returned
:param countoutput: Return the number of records in the result instead of the actual data
:param value: Return only events with the given values
:param severities: Return only events with given event severities. Applies only if object is trigger
:param time_from: Return only events that have been created after or at the given time
:param time_till: Return only events that have been created before or at the given time
:param selecthosts: Return a hosts property with hosts containing the object that created the event.
Supported only for events generated by triggers, items or LLD rules
:param hostids: Return only events created by objects that belong to the given hosts
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"countOutput": countoutput,
"value": value,
"severities": severities,
"time_from": time_from,
"time_till": time_till,
"selectHosts": selecthosts,
"hostids": hostids,
}
default_dict.update(**kwargs)
return self._zbx_request(
method="event.get",
params=default_dict
)
def get_trends(self, itemids=None, time_from=None, time_till=None, **kwargs):
"""
Get Zabbix trends info:
:param itemids: Return only trends with the given item IDs
:param time_from: Return only values that have been collected after or at the given time
:param time_till: Return only values that have been collected before or at the given time
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"itemids": itemids,
"time_from": time_from,
"time_till": time_till
}
default_dict.update(**kwargs)
return self._zbx_request(
method="trend.get",
params=default_dict
)
def get_sla(self, output=None, serviceids=None, selectservicetags=None, filter_=None, **kwargs):
"""
Get Zabbix SLA info:
:param output: Object properties to be returned
:param serviceids: Return only SLAs matching the specific services
:param selectservicetags: Return a service_tags property with SLA service tags
:param filter_: Return only those results that exactly match the given filter
:param kwargs: Keyword parameters to merge
:return:
"""
default_dict = {
"output": output,
"serviceids": serviceids,
"selectServiceTags": selectservicetags,
"filter": filter_
}
default_dict.update(**kwargs)
return self._zbx_request(
method="sla.get",
params=default_dict
)
class ZabbixApiUpdate(ZabbixApiGet):
"""Zabbix Update Methods"""
def __init__(self, zapi):
self._zapi = zapi
super().__init__(zapi)
def update_host(self, params: dict):
"""Update Zabbix host"""
self._zbx_request(
method="host.update",
params=params
)
def update_user(self, params: dict):
"""Update Zabbix user"""
self._zbx_request(
method="user.update",
params=params
)
def update_item(self, params: dict):
"""Update Zabbix item"""
self._zbx_request(
method="item.update",
params=params
)
def update_action(self, params: dict):
"""Update Zabbix action"""
self._zbx_request(
method="action.update",
params=params
)
def mass_update_host(self, params: dict):
"""Mass update Zabbix host"""
self._zbx_request(
method="host.massupdate",
params=params
)
class ZabbixApiCreate(ZabbixApiGet):
"""Zabbix Create Methods"""
def __init__(self, zapi):
self._zapi = zapi
super().__init__(zapi)
def create_item(self, delay=None, hostid=None, key_=None, name=None,
type_=None, value_type=None, data_type=None,
units=None, params=None, tags=None, **kwargs):
"""
Create Zabbix item:
:param tags: Item tags
:param delay: Update interval of the item. Accepts seconds or a time unit with suffix (30s,1m,2h,1d)(required)
:param hostid: ID of the host or template that the item belongs to(required)
:param key_: Item key(required)
:param name: Name of the item(required)
:param type_: Type of the item(required)
:param value_type: Type of information of the item(required)
:param data_type:
:param units: Value units
:param params: Additional parameters depending on the type of the item
:param kwargs: Keyword parameters to merge
:return:
"""
if tags is None:
tags = []
default_dt = {
"delay": delay,
"hostid": hostid,
"key_": key_,
"name": name,
"type": type_,
"value_type": value_type,
"data_type": data_type,
"units": units,
"params": params,
"tags": tags
}
default_dt.update(**kwargs)
self._zbx_request(
method="item.create",
params=default_dt
)
def create_usrgrp(self, grp_name: str, groupid: str, permission: int):
"""
Create Zabbix user group:
:param grp_name: Name of the user group(required)
:param groupid: ID of the host group to add permission to(required)
:param permission: Access level to the host group(required)
0 - access denied
2 - read-only access
3 - read-write access
:return:
"""
self._zbx_request(
method="usergroup.create",
params={
"name": grp_name,
"rights": {
"id": groupid,
"permission": permission
}
}
)
def create_ht_interface(self, hostid: str, ip_: str, main: int = 0, port="10050", type_=1, useip=1):
"""
Create Zabbix host interface:
:param hostid: ID of the host the interface belongs to(required)
:param ip_: IP address used by the interface(required)
:param main: Whether the interface is used as default on the host.
Only one interface of some type can be set as default on a host
0 - not default
1 - default
:param port: Port number used by the interface. Can contain user macros(required)
:param type_: Interface type
1 - agent
2 - SNMP
3 - IPMI
4 - JMX
:param useip: Whether the connection should be made via IP
0 - connect using host DNS name
1 - connect using host IP address for this host interface
:return:
"""
self._zbx_request(
method="hostinterface.create",
params={
"hostid": hostid,
"dns": "",
"ip": ip_,
"main": main,
"port": port,
"type": type_,
"useip": useip
}
)
def create_service(self, service_name: str, children=None, parents=None,
problem_tags=None, tags=None, algorithm: int = 1, sortorder: int = 0):
"""
Create Zabbix service:
:param tags: Service tags to be created for the service
:param problem_tags: Problem tags to be created for the service
:param parents: Parent services to be linked to the service
:param children: Child services to replace the current service children
:param sortorder: Position of the service used for sorting(required), Possible values: 0-999
:param algorithm: Status calculation rule. Only applicable if child services exist(required)
0 - set status to OK
1 - most critical if all children have problems
2 - most critical of child services
:param service_name: Name of the service(required)
:return:
"""
if tags is None:
tags = list()
if problem_tags is None:
problem_tags = []
if parents is None:
parents = list()
if children is None:
children = list()
return self._zbx_request(
method="service.create",
params={
"name": service_name,
"algorithm": algorithm,
"sortorder": sortorder,
"children": children,
"parents": parents,
"problem_tags": problem_tags,
"tags": tags
}
)
class ZabbixApiDel(ZabbixApiGet):
"""Zabbix Delete Methods"""
def __init__(self, zapi):
self._zapi = zapi
super().__init__(zapi)
def del_service(self, serviceids: list):
"""Delete Zabbix service"""
self._zbx_request(
method="service.delete",
params=serviceids
)
def del_interface(self, interfaceids: list):
"""Delete Zabbix host interface"""
self._zbx_request(
method="hostinterface.delete",
params=interfaceids
) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/utils/zbxapis.py | zbxapis.py |
import argparse
from ast import literal_eval
import logging
import os
import pandas as pd
from lib.utils.zbxapis import ZabbixApiGet
from lib.utils.format import jmes_search, to_excel_
from lib.utils.format import re_search, re_findall, get_value, pretty_tbl
def show(res):
"""
打印出 Zabbix Discovery Hosts 信息:
:param res:
:return:
"""
field_names = literal_eval(get_value(section="ZABBIX", option="DF_CH_FIELD_NAMES"))
tbl = pretty_tbl(
title="Zabbix Discovery Hosts Info",
field_names=field_names,
rows=res.values.tolist()
)
for field in field_names:
tbl.align[field] = "l"
print(tbl)
class ZbxDservices:
def __init__(self, zapi, drule: str):
self.zapi = zapi
self.drule = drule
@property
def discovery_rules(self):
"""
根据【关键字】搜索 Zabbix 自动发现规则:
:return:
"""
return self.zapi.get_drules(
output="extend",
selectdchecks="extend",
selectdhosts="extend",
search={"name": self.drule},
searchwildcardsenabled=True
)
@property
def dservices(self):
"""
根据【自动发现规则 id】 获取所有已被发现的服务:
:return:
"""
return self.zapi.get_dservices(
output=["dcheckid", "ip", "status", "value", "dhostid"],
druleids=jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_DRULEIDS"
),
data=self.discovery_rules
),
selectDRules=["name"],
selectHosts=["host", "status"]
)
@property
def hyperv_hosts(self):
"""
获取 Zabbix "Hypervisors" 主机组下所有主机的【主机名】:
:return:
"""
return jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_HOSTGROUP_HOSTS_HOSTNAME"
),
data=self.zapi.get_ht_grps(
output=["groupid", "name"],
filter_={"name": ["Hypervisors"]},
selecthosts=["name"]
)
)
def get_check_ids(self, type_: str):
"""
获取自动发现规则的 【dcheckid】:
1. ICMP ping 类型的 "Type" 为 "12";
2. Zabbix agent 类型的 "Type" 为 "9"。
:return:
"""
return jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_CHECK_IDS",
raw=True
) % type_,
data=self.discovery_rules
)
def get_hosts(self, filter_: dict):
"""
根据过滤条件获取主机的 Inventory 信息:
:param filter_:
:return:
"""
return self.zapi.get_hts(
filter_=filter_,
selectInventory=["poc_1_name", "os_short"]
)
def make_discover_data(args):
"""
生成 "ICMP ping" 类型和 "Zabbix agent" 类型的自动发现规则下的主机数据:
:param args:
:return:
"""
instance_ = ZbxDservices(ZabbixApiGet(args.zapi), args.drule)
df = pd.DataFrame(
columns=literal_eval(get_value(section="ZABBIX", option="DISCOVER_HOST_FIELDS"))
)
for dservice in instance_.dservices:
info = {}
# Zabbix "ICMP ping" Check
if dservice["dcheckid"] in instance_.get_check_ids("12"):
info["dhostid"] = dservice.get("dhostid")
info["check_type"] = "icmp"
info["ipv4"] = dservice.get("ip")
if dservice["hosts"]:
info["monitored"] = "是"
info["host"] = dservice.get("hosts")[0].get("host")
info["status"] = ("启用" if dservice.get("hosts")[0].get("status") == "0" else "禁用")
elif dservice["ip"] in instance_.hyperv_hosts:
zbx_host = instance_.get_hosts({"name": dservice["ip"]})[0]
info["monitored"] = "是"
info["host"] = zbx_host.get("host")
info["status"] = ("启用" if zbx_host.get("status") == "0" else "禁用")
info["poc"] = zbx_host.get("inventory").get("poc_1_name")
info["os"] = zbx_host.get("inventory").get("os_short")
# Zabbix "Zabbix agent" Check
if dservice["dcheckid"] in instance_.get_check_ids("9"):
info["dhostid"] = dservice.get("dhostid")
info["check_type"] = "agent"
info["ipv4"] = dservice.get("ip")
host = instance_.get_hosts({"host": dservice.get("value")})
if host:
zbx_host = host[0]
info["host"] = dservice.get("value")
info["monitored"] = "是"
info["status"] = ("启用" if dservice.get("status") == "0" else "禁用")
info["poc"] = zbx_host.get("inventory").get("poc_1_name")
info["os"] = zbx_host.get("inventory").get("os_short")
if info:
# rule name 符合 "总类-业务网类-负责人" 形式,提取出业务网络和负责人信息
drule_name = dservice.get("drules")[0].get("name")
rexp = get_value(section="REXP", option="PERSON_IN_CHARGE")
if re_search(rexp, drule_name):
_, net, poc = re_findall(rexp, drule_name)[0]
# 如从 inventory 中取到了 POC, 则优先使用, 否则使用 rule name 中的负责人
info["poc"] = info.get("poc") if info.get("poc") else poc
info["net"] = net
df = df.append(pd.Series(info), ignore_index=True)
# 既有 icmp check 又有 agent check 的情况, dhostid 相同, 通过 check_type 排序后
# 去除 icmp check 的那一行数据, 以 agent check 为准
df = df.sort_values(
by=["dhostid", "check_type"],
ascending=False
).drop_duplicates(subset="dhostid", keep="last")
return df
def main(args):
"""
利用 Pandas 处理自动发现服务的数据并导出为 Execl 文件:
:param args:
:return:
"""
df = make_discover_data(args)
# 按照 host 进行 group by, 其余字段进行单元格合并(仅 host 不为空的行参与)
# 如果同一 host 有多个不同 ipv4, 则认为是多网卡(有可能是一张物理网卡使用多个 ip)
df2 = df.groupby("host", as_index=False).apply(
lambda x: pd.Series(
{
"ipv4": ",".join(x.ipv4.unique()),
"monitored": ",".join([i for i in x.monitored.unique() if isinstance(i, str)]),
"status": ",".join([i for i in x.status.unique() if isinstance(i, str)]),
"multi_inf": ("是" if x.ipv4.count() > 1 else "否"),
"net": ",".join([i for i in x.net.unique() if isinstance(i, str)]),
"poc": ",".join([i for i in x.poc.unique() if isinstance(i, str)]),
"os": ",".join([i for i in x.os.unique() if isinstance(i, str)]),
}
)
)
# 将 df 中 host 为空的数据与 df2 拼接在一起
# drop 参数避免将旧索引添加为列
res = df[df.host.isna()].drop(
columns=["dhostid", "check_type"],
axis=1
).append(df2).reset_index(drop=True)
res.sort_values(by=["host"], na_position="last", inplace=True)
res.monitored.fillna(value="否", inplace=True)
res.multi_inf.fillna(value="否", inplace=True)
# 字段重命名为中文
res.rename(
columns=literal_eval(get_value(section="ZABBIX", option="DF_CH")),
inplace=True
)
show(res)
if args.output:
suffix = get_value(section="EXCEL", option="EXCEL_SUFFIX")
fname = args.output if args.output.endswith(suffix) else args.output + suffix
to_excel_(
df=res,
fname=fname,
shname="discovery数据"
)
if os.path.exists(fname):
logging.info("\033[32m成功导出 Excel 文件:%s\033[0m", os.path.abspath(fname))
parser = argparse.ArgumentParser(
description="Get Zabbix 'Discovery' type host's info and export it as EXECL file"
)
parser.add_argument(
"-r",
"--drule",
type=str,
help="discovery rule"
)
parser.add_argument(
"-o",
"--output",
help="output save to an excel file, xx.xlsx"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/discovery.py | discovery.py |
import argparse
import logging
from lib.utils.zbxapis import ZabbixApiCreate, ZabbixApiUpdate
from lib.utils.format import md5_, get_value, DiskCache
from lib.utils.zbxtags import ItemTags
class Items:
def __init__(self, zapi):
self._zapi = zapi
self._name = "Zabbix server"
self._cache = DiskCache()
@property
def server_hostid(self):
if self._cache.get_cache("hostid_zbx_server"):
return self._cache.get_cache("hostid_zbx_server")
if not self._cache.get_cache("hostid_zbx_server"):
server_host = ZabbixApiCreate(self._zapi).get_hts(
filter_={"host": "Zabbix server"}
)
if server_host:
self._cache.set_cache(
"hostid_zbx_server",
server_host[0].get("hostid"),
expire=300
)
return self._cache.get_cache("hostid_zbx_server")
def get_item_info(self, name: str):
item = ZabbixApiCreate(self._zapi).get_items(
hostids=[self.server_hostid],
filter_={"name": name},
output=["itemid", "params"],
selecthosts=["host", "status", "tags"],
selecttags="extend"
)
if item and item[0].get("itemid") and item[0].get("params"):
return item[0].get("itemid"), item[0].get("params"), \
item[0].get("tags"), item[0].get("hosts")[0].get("host")
def update(self, name: str, params: str, tag_name: str):
if self.get_item_info(name):
itemid = self.get_item_info(name)[0]
item_tags = ItemTags(self.get_item_info(name)[2]).added_item_tags(
tag_name="Application",
tag_value=tag_name
)
if itemid and self.get_item_info(name)[1]:
if self.get_item_info(name)[1] != params or \
self.get_item_info(name)[2] != item_tags:
ZabbixApiUpdate(self._zapi).update_item(
{
"itemid": itemid,
"params": params,
"tags": item_tags
}
)
logging.info(
"\033[32m主机 '%s' 成功更新监控项: '(ItemID)%s' => '(ItemName)%s'\033[0m",
self.get_item_info(name)[3],
itemid,
name
)
else:
logging.info(
"\033[33m主机 '%s' 监控项未发生改变:'(ItemID)%s' => '(ItemName)%s'\033[0m",
self.get_item_info(name)[3],
itemid,
name
)
def create(self, item_name=None, delay=None, key_=None, tag_value=None,
type_=15, value_type=3, data_type=0, units="B", params=None):
"""
创建 Zabbix Item:
1. Zabbix 6.0 版本 item 类型没有 "Zabbix aggregate" 了;
2. 以往的 "Zabbix aggregate" 需使用 "calculated" 来代替;
3. 创建一个 "calculated" 类型的 item,主要包含三部分:item name、item key、formula;
4. Zabbix 6.0 中的 "formula" 语法与以往稍有不同,
例如:sum(last_foreach(/*/vfs.fs.usedsize?[group="%s"]))。
:param tag_value:
:param item_name:
:param delay:
:param key_:
:param type_:
:param value_type:
:param data_type:
:param units:
:param params:
:return:
"""
if not self.get_item_info(item_name):
ZabbixApiCreate(self._zapi).create_item(
delay=delay,
hostid=self.server_hostid,
key_=key_,
name=item_name,
type=type_,
value_type=value_type,
data_type=data_type,
units=units,
params=params,
tags=[{"tag": "Application", "value": tag_value}]
)
logging.info(
"\033[32m主机 '%s' 成功创建监控项 '%s'\033[0m",
self.get_item_info(item_name)[3],
item_name
)
def create_total_disk_space_item(self, grp: str):
"""
创建主机组【总磁盘空间】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="TOTAL_DISK_SPACE_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="TOTAL_FS_SIZE_PARAMS",
raw=True
) % grp
self.update(name=item_name, params=params, tag_name="Filesystem aggregation")
self.create(
item_name=item_name,
key_=get_value(
section="ZABBIX",
option="TOTAL_FS_SIZE_ITEM",
raw=True
) % grp,
delay=3600,
params=params,
tag_value="Filesystem aggregation"
)
def create_used_disk_space_item(self, grp: str):
"""
创建主机组【已使用磁盘空间】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="USED_DISK_SPACE_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="USED_FS_SIZE_PARAMS",
raw=True
) % grp
self.update(name=item_name, params=params, tag_name="Filesystem aggregation")
self.create(
item_name=item_name,
key_=get_value(
section="ZABBIX",
option="USED_FS_SIZE_ITEM",
raw=True
) % grp,
delay=3600,
params=params,
tag_value="Filesystem aggregation"
)
def create_used_disk_space_per_item(self, grp: str):
"""
创建主机组【磁盘空间使用率】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="USED_DISK_SPACE_PERCENTAGE_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="DISK_SPACE_PARAMS",
raw=True
) % (self._name, grp, self._name, grp)
self.update(name=item_name, params=params, tag_name="Filesystem aggregation")
self.create(
item_name,
key_=md5_(
get_value(
section="ZABBIX",
option="USED_DISK_SPACE_PERCENTAGE_ITEM",
raw=True
) % grp),
delay=86400,
value_type=0,
units="%",
params=params,
tag_value="Filesystem aggregation"
)
def create_total_vm_item(self, grp: str):
"""
创建主机组【总内存空间】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="TOTAL_VM_SIZE_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="TOTAL_VM_SIZE_PARAMS",
raw=True
) % grp
self.update(name=item_name, params=params, tag_name="Memory aggregation")
self.create(
item_name=item_name,
key_=get_value(
section="ZABBIX",
option="TOTAL_VM_SIZE_ITEM",
raw=True
) % grp,
delay=600,
params=params,
tag_value="Memory aggregation"
)
def create_used_vm_item(self, grp: str):
"""
创建主机组【已使用内存】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="USED_VM_SIZE_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="USED_VM_SIZE_PARAMS",
raw=True
) % grp
self.update(name=item_name, params=params, tag_name="Memory aggregation")
self.create(
item_name=item_name,
key_=get_value(
section="ZABBIX",
option="USED_VM_SIZE_ITEM",
raw=True
) % grp,
delay=600,
params=params,
tag_value="Memory aggregation"
)
def create_used_vm_per_item(self, grp: str):
"""
创建主机组【内存使用率】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="VM_UTIL_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="VM_SPACE_PARAMS",
raw=True
) % (self._name, grp, self._name, grp)
self.update(name=item_name, params=params, tag_name="Memory aggregation")
self.create(
item_name=item_name,
key_=md5_(
get_value(
section="ZABBIX",
option="USED_VM_SPACE_PERCENTAGE_ITEM",
raw=True
) % grp
),
delay=3600,
value_type=0,
units="%",
params=params,
tag_value="Memory aggregation"
)
def create_avg_cpu_item(self, grp: str):
"""
创建主机组【CPU平均使用率】监控项:
:param grp:
:return:
"""
item_name = get_value(
section="ZABBIX",
option="AVG_CPU_UTIL_ITEM_NAME",
raw=True
) % grp
params = get_value(
section="ZABBIX",
option="CPU_UTIL_PARAMS",
raw=True
) % grp
self.update(name=item_name, params=params, tag_name="CPU aggregation")
self.create(
item_name=item_name,
key_=get_value(
section="ZABBIX",
option="CPU_UTIL_ITEM",
raw=True
) % grp,
delay=60,
value_type=0,
units="%",
params=params,
tag_value="CPU aggregation"
)
def main(args):
"""main function"""
instance_ = Items(args.zapi)
for grp in args.hostgroup:
instance_.create_total_disk_space_item(grp)
instance_.create_used_disk_space_item(grp)
instance_.create_used_disk_space_per_item(grp)
instance_.create_total_vm_item(grp)
instance_.create_used_vm_item(grp)
instance_.create_used_vm_per_item(grp)
instance_.create_avg_cpu_item(grp)
parser = argparse.ArgumentParser()
parser.add_argument("hostgroup", nargs="+", help="host group name")
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/hostgrp_aggr_item.py | hostgrp_aggr_item.py |
import argparse
import logging
import time
from datetime import datetime
from lib.utils.zbxapis import ZabbixApiGet
from lib.utils.esapis import ESManager
from lib.utils.format import jmes_search, get_value
body = {
"order": "500",
"index_patterns": [
"zabbix-raw-host-info-*"
],
"mappings": {
"properties": {
"hostid": {
"type": "integer"
},
"proxy_hostid": {
"type": "integer"
},
"status": {
"type": "byte"
},
"disable_until": {
"type": "date"
},
"available": {
"type": "byte"
},
"errors_from": {
"type": "date"
},
"lastaccess": {
"type": "byte"
},
"ipmi_authtype": {
"type": "byte"
},
"ipmi_privilege": {
"type": "byte"
},
"ipmi_disable_until": {
"type": "date"
},
"ipmi_available": {
"type": "byte"
},
"snmp_disable_until": {
"type": "date"
},
"snmp_available": {
"type": "byte"
},
"maintenanceid": {
"type": "integer"
},
"maintenance_status": {
"type": "byte"
},
"maintenance_type": {
"type": "byte"
},
"maintenance_from": {
"type": "date"
},
"ipmi_errors_from": {
"type": "date"
},
"snmp_errors_from": {
"type": "date"
},
"jmx_disable_until": {
"type": "date"
},
"jmx_available": {
"type": "byte"
},
"jmx_errors_from": {
"type": "date"
},
"flags": {
"type": "byte"
},
"templateid": {
"type": "integer"
},
"tls_connect": {
"type": "byte"
},
"tls_accept": {
"type": "byte"
},
"auto_compress": {
"type": "byte"
},
"groups": {
"properties": {
"groupid": {
"type": "integer"
},
"internal": {
"type": "byte"
},
"flags": {
"type": "byte"
},
"name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"interfaces": {
"properties": {
"ip": {
"type": "ip"
},
"interfaceid": {
"type": "integer"
},
"hostid": {
"type": "integer"
},
"main": {
"type": "byte"
},
"type": {
"type": "byte"
},
"useip": {
"type": "byte"
},
"port": {
"type": "integer"
},
"bulk": {
"type": "byte"
}
}
},
"inventory": {
"properties": {
"hostid": {
"type": "integer"
},
"inventory_mode": {
"type": "byte"
},
"alias": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"asset_tag": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"chassis": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"host_netmask": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"host_networks": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"hw_arch": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"location": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"macaddress_a": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"macaddress_b": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"model": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"oob_ip": {
"type": "text"
},
"os": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"os_full": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"os_short": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"poc_1_name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"poc_2_name": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"serialno_a": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"site_rack": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"tag": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"type": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"type_full": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
},
"vendor": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword"
}
}
}
}
},
"主机组": {
"type": "alias",
"path": "groups.name"
},
"接口地址": {
"type": "alias",
"path": "interfaces.ip"
},
"主机别名": {
"type": "alias",
"path": "inventory.alias"
},
"资产标签": {
"type": "alias",
"path": "inventory.asset_tag"
},
"机架": {
"type": "alias",
"path": "inventory.chassis"
},
"子网掩码": {
"type": "alias",
"path": "inventory.host_netmask"
},
"主机网络": {
"type": "alias",
"path": "inventory.host_networks"
},
"硬件架构": {
"type": "alias",
"path": "inventory.hw_arch"
},
"机房": {
"type": "alias",
"path": "inventory.location"
},
"MAC_A": {
"type": "alias",
"path": "inventory.macaddress_a"
},
"MAC_B": {
"type": "alias",
"path": "inventory.macaddress_b"
},
"型号": {
"type": "alias",
"path": "inventory.model"
},
"主机名称": {
"type": "alias",
"path": "inventory.name"
},
"管理IP": {
"type": "alias",
"path": "inventory.oob_ip"
},
"OS": {
"type": "alias",
"path": "inventory.os"
},
"OS_FULL": {
"type": "alias",
"path": "inventory.os_full"
},
"OS_SHORT": {
"type": "alias",
"path": "inventory.os_short"
},
"主负责人": {
"type": "alias",
"path": "inventory.poc_1_name"
},
"次负责人": {
"type": "alias",
"path": "inventory.poc_2_name"
},
"序列号": {
"type": "alias",
"path": "inventory.serialno_a"
},
"机柜": {
"type": "alias",
"path": "inventory.site_rack"
},
"标签": {
"type": "alias",
"path": "inventory.tag"
},
"类型": {
"type": "alias",
"path": "inventory.type"
},
"具体类型": {
"type": "alias",
"path": "inventory.type_full"
},
"供应商": {
"type": "alias",
"path": "inventory.vendor"
}
}
}
}
def get_hosts(args, es_client, tpl_name):
"""
获取 Zabbix 主机的 Inventory 信息:
:param args:
:param es_client:
:param tpl_name:
:return:
"""
body_datas = []
hosts = ZabbixApiGet(args.zapi).get_hts(
output="extend",
selectgroups="extend",
selectinterfaces="extend",
selectinventory="extend"
)
localtime = time.strftime("%Y.%m.%d", time.localtime())
for host in hosts:
host["@timestamp"] = datetime.utcfromtimestamp(time.time())
inventory = host.get("inventory") if isinstance(host.get("inventory"), dict) else {}
body_datas.append(
{
"_id": host.get("hostid"),
"主机名称": inventory.get("name", host.get("host")),
"主机别名": inventory.get("alias", host.get("host")),
"接口地址": jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_HOST_IPS"),
data=host
),
"主机组": jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_HOST_GROUP_NAMES"),
data=host
),
"OS": inventory.get("os"),
"OS_FULL": inventory.get("os_full"),
"OS_SHORT": inventory.get("os_short"),
"资产标签": inventory.get("asset_tag"),
"主负责人": inventory.get("poc_1_name"),
"次负责人": inventory.get("poc_2_name"),
"机架": inventory.get("chassis"),
"子网掩码": inventory.get("host_netmask"),
"主机网络": inventory.get("host_networks"),
"机房": inventory.get("location"),
"机柜": inventory.get("site_rack"),
"序列号": inventory.get("serialno_a"),
"管理IP": inventory.get("oob_ip"),
"MAC_A": inventory.get("macaddress_a"),
"MAC_B": inventory.get("macaddress_b"),
"硬件架构": inventory.get("hw_arch"),
"标签": inventory.get("tag"),
"类型": inventory.get("type"),
"具体类型": inventory.get("type_full"),
"型号": inventory.get("model"),
"供应商": inventory.get("vendor"),
"@timestamp": datetime.utcfromtimestamp(time.time())
}
)
es_client.put_template(tpl_name=tpl_name, body=body)
for host in hosts:
host["_id"] = host["hostid"]
index_of_raw_host = get_value(
section="ELASTICSTACK",
option="ZABBIX_RAW_HOST_INDEX"
) + localtime
es_client.bulk(actions=hosts, index=index_of_raw_host)
logging.info(
"\033[32m成功生成 ES 索引:'(ES Host)%s' => '(ES INDEX)%s'\033[0m",
args.es_url,
index_of_raw_host
)
index_of_host = get_value(
section="ELASTICSTACK",
option="ZABBIX_HOST_INDEX"
) + localtime
es_client.bulk(actions=body_datas, index=index_of_host)
logging.info(
"\033[32m成功生成 ES 索引:'(ES Host)%s' => '(ES INDEX)%s'\033[0m",
args.es_url,
index_of_host
)
def main(args):
"""创建 ES 索引"""
get_hosts(
args=args,
es_client=ESManager(args.es_url, args.es_user, args.es_passwd),
tpl_name=args.es_tpl
)
parser = argparse.ArgumentParser(description="Gather zabbix host informations and create es index")
parser.add_argument(
"--es_url",
type=str,
required=True,
help="ElasticSearch server ip"
)
parser.add_argument(
"--es_user",
default="",
help="ElasticSearch server login user"
)
parser.add_argument(
"--es_passwd",
default="",
help="ElasticSearch server login password"
)
parser.add_argument(
"--es_tpl",
required=True,
help="ElasticSearch index template name"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/es_index_zbxhost.py | es_index_zbxhost.py |
import argparse
import logging
from lib.utils.zbxapis import ZabbixApiUpdate
def update_action_send_users(zapi, media_name: str, action_name: str):
"""
根据 Action 名称更新 operations 的 "send to users" 列表:
1. 首先根据 Media 名称获取这个 Media 下的所有用户信息(即哪些用户配置了这个 Media);
2. 然后,根据 Action 名称获取 Operations 信息;
3. 其次,获取此 Operation 原有的 "send to users" 列表;
4. 再比对 "send to users" 列表和根据 Media 名称获取的用户列表;
5. 最后追加不在原有 "send to users" 列表里的用户信息;
6. 【action.update() 方法要求更新原有 Action 所有参数字段,否则会清空没有更新到的参数的值】。
:param zapi:
:param media_name:
:param action_name:
:return:
"""
media = zapi.get_medias(
filter_={"name": media_name},
selectusers=["userid"],
output=["users"]
)
action = zapi.get_actions(
output="extend",
selectoperations="extend",
filter_={"name": action_name}
)
usr_groups = zapi.get_usr_grps(
output=["usrgrpid", "name", "users"],
selectusers=["userid"],
filter_={"name": ["Zabbix administrators", "Disabled"]}
)
if not media or not action:
logging.info("update None! Action -> ['%s']", action_name)
if media and action:
media_users = media[0].get("users")
operations = action[0].get("operations")
usrgrp_users = []
for grp in usr_groups:
usrgrp_users.extend(grp.get("users"))
for operation in operations:
# 排除在 "Zabbix administrators"、"Disabled" 这两个用户组中的用户
media_users = [user for user in media_users if user not in usrgrp_users]
ops_users = operation.get("opmessage_usr")
ops_users.extend(media_users)
# 对 "user_id" 进行去重
new_ops_users = [dict(d) for d in (set([tuple(d.items()) for d in ops_users]))]
operation["opmessage_usr"] = new_ops_users
del operation["operationid"]
del operation["actionid"]
if not operation["opmessage"]["subject"]:
del operation["opmessage"]["subject"]
if not operation["opmessage"]["message"]:
del operation["opmessage"]["message"]
zapi.update_action(
{
"actionid": action[0].get("actionid"),
"operations": operations
}
)
logging.info(
"\033[32m成功更新Action '%s'\033[0m",
action[0].get("name")
)
def main(args):
"""Main Function"""
update_action_send_users(
zapi=ZabbixApiUpdate(args.zapi),
media_name=args.media,
action_name=args.action
)
parser = argparse.ArgumentParser(
description="Automatically search for the media type configured by the user,"
"and then configure it as action"
)
parser.add_argument(
"-m",
"--media",
required=True,
type=str,
help="user configured media type"
)
parser.add_argument(
"-a",
"--action",
required=True,
type=str,
help="the alarm action"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/send_to_all_users.py | send_to_all_users.py |
import argparse
import logging
from lib.utils.zbxtags import HostTags
class OOB:
def __init__(self, zapi):
self._zapi = zapi
@property
def server_bmc_hosts(self):
"""
根据 Host Inventory Type 获取 "Server" 类型和 "BMC" 类型的主机信息:
:return:
"""
def get_hosts_by_inventory_type(inventory_type):
return list(
filter(
lambda x: len(x.get("inventory").get("serialno_a")) and len(x.get("interfaces")),
self._zapi.host.get(
{
"output": ["hostid", "name", "flags"],
"selectInventory": ["tag", "serialno_a"],
"selectInterfaces": ["type", "main", "ip"],
"searchInventory": {"type": inventory_type},
"selectTags": ["tag", "value"]
}
)
)
)
return get_hosts_by_inventory_type("Server"), get_hosts_by_inventory_type("BMC")
def update(self, hostid: str, flags=None, host_tags=None, oob_ip=None):
"""
更新主机信息:
1. 主要更新 Host Tags、Inventory Tags、Inventory OOB IP address。
:param hostid:
:param flags:
:param host_tags:
:param oob_ip:
:return:
"""
# flags: 0 - a plain host;4 - a discovered host
if flags == "4":
self._zapi.host.update(
{
"hostid": hostid,
"inventory": {
"oob_ip": oob_ip
}
}
)
else:
self._zapi.host.update(
{
"hostid": hostid,
"tags": host_tags,
"inventory": {
"oob_ip": oob_ip
}
}
)
def search_hosts(self, tag_name: str):
"""
根据标签名搜索主机信息:
1. Zabbix 6.0 版本支持 Host tag,但以往的旧版本主机是依据 Host Inventory tag,所以需要分两种情况;
1.1 自动发现类型的主机,Zabbix Api 接口提示自动发现主机是不能添加 Host Tags 的,
所以只能在 Host Inventory Tag 添加,因而只能根据 Host Inventory Tag 搜索;
1.2 不属于自动发现类型的主机,Host 是可以添加 tag 的,因而根据 Host Tags 搜索。
:param tag_name:
:return:
"""
hosts = []
hosts_no_discovery = self._zapi.host.get(
{
"output": ["hostid", "host"],
"selectInventory": ["tag", "oob_ip"],
"searchInventory": {"tag": tag_name},
"selectTags": ["tag", "value"]
}
)
hosts_with_discovery = self._zapi.host.get(
{
"output": ["hostid", "host"],
"selectInventory": ["tag", "oob_ip"],
"tags": [{"tag": tag_name}],
"selectTags": ["tag", "value"]
}
)
for host in hosts_no_discovery + hosts_with_discovery:
if host not in hosts:
hosts.append(host)
if hosts:
tag_hosts = [ht for ht in hosts if HostTags(ht.get("tags")).have(tag_name)]
inventory_tag_hosts = [
ht for ht in hosts if
isinstance(ht.get("inventory"), dict)
and "auto_oob" in ht.get("inventory").get("tag")
]
return tag_hosts + inventory_tag_hosts
def rm_auto_oob_tag(self):
"""
清除 "Server" 类型主机的 "auto_oob" 标签:
:return:
"""
hosts = self.search_hosts("auto_oob")
if hosts:
for host in hosts:
self.update(
hostid=host.get("hostid"),
host_tags=HostTags(host.get("tags")).deleted_tags("auto_oob")
)
logging.info(
"\033[32m成功删除主机 '%s' 的 oob_ip 和 'auto_oob' 标签\033[0m",
host.get("host")
)
def rm_auto_server_tag(self):
"""
清除 "BMC" 类型主机的 "auto_server" 标签:
:return:
"""
hosts = self.search_hosts("auto_server")
for host in hosts:
self.update(
hostid=host.get("hostid"),
host_tags=HostTags(host.get("tags")).deleted_tags("auto_server")
)
logging.info(
"\033[32m成功删除主机 '%s' 的 'auto_server' 标签\033[0m",
host.get("host")
)
def handle_hosts_tag(self):
"""
给 "Server" 和 "BMC" 类型的主机添加 "auto_oob" 和 "auto_server" 标签:
:return:
"""
server_hosts, bmc_hosts = self.server_bmc_hosts
logging.info(
"\033[32m获取到 %d 台 'Server' 类型主机,%d 台 'BMC' 类型主机\033[0m",
len(server_hosts),
len(bmc_hosts)
)
server_serials = [
host.get("inventory").get("serialno_a")
for host in server_hosts if isinstance(host.get("inventory"), dict)
]
bmc_serials = [
host.get("inventory").get("serialno_a")
for host in bmc_hosts if isinstance(host.get("inventory"), dict)
]
match = list(set(server_serials) & set(bmc_serials))
logging.info(
"\033[32m在 'Server' 类型主机和 'BMC' 类型主机之间总共匹配到 %d 个 'serialno'\033[0m",
len(match)
)
if match:
for serialno in match:
server_host = [
host for host in server_hosts
if host.get("inventory").get("serialno_a") == serialno
][0]
bmc_host = [
host for host in bmc_hosts
if host.get("inventory").get("serialno_a") == serialno
][0]
logging.info(
"\033[32m在 'Server' 类型主机 '%s' 和 'BMC' 类型主机 '%s' 之间成功匹配 serialno_a '%s'\033[0m",
server_host.get("name"),
bmc_host.get("name"),
serialno
)
# 更新 Server 主机清单的 tag 和 oob_ip 字段
if bmc_host.get("interfaces"):
self.update(
hostid=server_host.get("hostid"),
flags=server_host.get("flags"),
host_tags=HostTags(server_host.get("tags")).added_tags("auto_oob", ""),
oob_ip=bmc_host.get("interfaces")[0].get("ip")
)
# 更新 BMC 主机清单的 tag 字段
if server_host.get("interfaces"):
self.update(
hostid=bmc_host.get("hostid"),
host_tags=HostTags(bmc_host.get("tags")).added_tags(
"auto_server",
server_host.get("interfaces")[0].get("ip")
)
)
def main(args):
"""Main Function"""
zapi = args.zapi
# 清除实体服务器 inventory tag 字段中的 auto_oob 标识
if args.rm_auto_oob:
OOB(zapi).rm_auto_oob_tag()
# 清除 BMC 的 inventory tag 字段中的 auto_server 标识
if args.rm_auto_server:
OOB(zapi).rm_auto_server_tag()
# 设置 Server 的 auto_oob 和 BMC 的 auto_server inventory tag
if args.tags:
OOB(zapi).handle_hosts_tag()
parser = argparse.ArgumentParser(description="Matching inventory OOB IP address")
parser.add_argument(
"-ro",
"--rm_auto_oob",
action="store_true",
help="Remove auto_oob in inventory tag field and reset the oob_ip inventory field"
)
parser.add_argument(
"-rs",
"--rm_auto_server",
action="store_true",
help="Remove auto_server=x.x.x.x in inventory tag field"
)
parser.add_argument(
"-t",
"--tags",
action="store_true",
help="Make server and bmc host inventory tag"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/oob.py | oob.py |
## zbxtool 6.0 各子命令功能介绍
* ```discovery```: 打印 Zabbix 自动发现主机信息, 或导出为 excel 文件。
* ```es_index_zbxhost```: 采集 Zabbix 中各主机的 inventory 信息并添加至 ElasticSearch 的 Index 中。
* ```fs_calc```: 在各 zabbix 主机上创建总磁盘空间和已用磁盘空间两个监控项。
* ```gen_analaysis_report```: 生成 zabbix 主机组资源使用率报表。
* ```hostgrp_aggr_item```: 在 Zabbix server 主机创建用于统计各主机组资源使用情况的监控项。
* ```hosttpl```: 批量添加、删除、更新 Zabbix 主机模板。
* ```inventory_supplementary```: vmware 主机更新 inventory type 字段为 vm, 主机有 rsync 进程监控项更新 host tag。
* ```ldap_usergrp```: 创建 Zabbix 每个主机组的用户组, 并同步到 ldap 的 ou=zabbix 的 user groups 中。
* ```multi_interfaces```: 输出 Zabbix 各主机的 inventory 的 Host networks 字段中的 ip 信息。
* ```oob```: 更新主机的 inventory OOB IP address 字段。
* ```send_to_all_users```: 按照 Media 类型自动将对应的用户添加到触发器动作的 send to users。
* ```service_tree```: 在 Zabbix 中依据主机组生成 it-service 树。
* ```sync_wework_media```: 从企业微信中获取用户 ID,更新到 Zabbix 用户的企业微信告警媒介的 sendto。
* ```update_hostgrp_poc```: 读取 ldap 人员信息, 更新 Zabbix 中各组主机的 inventory POC。
* ```update_hostname```: 消除 Zabbix 中 Discovered Hosts 组中 hostname 末尾的"下划线 + 数字"的情况。
* ```vmware_host_inventory```: 通过 Api 读取 vCenter 信息,更新 Zabbix 中 Hypervisors 组中 Host 的 inventory 信息。
* ```delete_user_media```: 删除用户不用的 mediatype
| zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/README.md | README.md |
import argparse
import sys
import logging
from urllib.parse import urlparse
from lib.utils.zbxtags import HostTags, InventoryTagDict
from lib.utils.esxiapis import VMManger
class UpdateZbxHost:
"""Mainly used to update the host inventory of zabbix 'ESXI' type host"""
def __init__(self, zapi):
self._zapi = zapi
@property
def esxi_hosts(self):
"""
用来根据模板的 tag 获取 Zabbix 主机的具体信息:
1. 获取到的 Zabbix 主机信息包括 Inventory、Macro、Host Tags、Discoveries 等。
:return:
"""
return self._zapi.host.get(
{
"output": ["hostid", "name", "inventory_mode", "flags"],
"tags": [
{"tag": "target", "value": "vmware"},
{"tag": "target", "value": "vmware-hypervisor"}
],
# Return hosts that have given tags also in all of their linked templates
# True - linked templates must also have given tags
# False - (default) linked template tags are ignored
"inheritedTags": True,
"selectMacros": ["macro", "value"],
"selectInventory": "extend",
"selectTags": ["tag", "value"],
"selectHostDiscovery": "extend"
}
)
@staticmethod
def get_update_params(inventory: dict, host: dict):
"""
用来获取 Zabbix 主机更新需要的字段信息:
1. 首先是 Host Inventory,ESXI 类型主机的 Host Inventory 信息主要通过 vCenter Server 获取;
2. 标签信息分为两种情况:
2.1 如果主机是自动发现类型的主机,Zabbix Api 接口提示自动发现主机是不能添加 Host Tags 的,
那就只能添加 Host Inventory Tag;
2.2 如果主机不是自动发现类型的主机,则可以添加 Host Tags,Host Inventory Tag 则不再添加。
:param inventory:
:param host:
:return:
"""
# flags: 0 - a plain host;4 - a discovered host
if host.get("flags") == "4":
inventory_tags = InventoryTagDict(host.get("inventory").get("tag"))
inventory_tags["Esxi"] = None
inventory.update({"tag": str(inventory_tags)})
return {
"hostid": host.get("hostid"),
"inventory": inventory
}
return {
"hostid": host.get("hostid"),
"tags": HostTags(host.get("tags")).added_tags(
tag_name="Esxi",
tag_value=""
),
"inventory": inventory
}
@staticmethod
def get_esxi_info(vcenter_ip: str, host: dict):
"""
根据 vCenter Server 获取 ESXI 主机信息:
:param vcenter_ip:
:param host:
:return:
"""
username = [
macro for macro in host.get("macros")
if macro.get("macro") == r"{$VMWARE.USERNAME}"
]
password = [
macro for macro in host.get("macros")
if macro.get("macro") == r"{$VMWARE.PASSWORD}"
]
if username and password:
return VMManger(
host=vcenter_ip,
user=username[0].get("value"),
passwd=password[0].get("value")
).fetch_esxi(esxi_name=host.get("name"))
def main(args):
"""Main Function"""
zapi = args.zapi
zbx = UpdateZbxHost(zapi)
if not zbx.esxi_hosts:
sys.exit()
hosts = zbx.esxi_hosts
# 如指定 limit 参数, 则仅处理列表中的 host
if args.limit:
hosts = [ht for ht in zbx.esxi_hosts if ht.get("name") in args.limit]
# 调用 zapi 查询 host 的 macros 信息
for host in hosts:
url = [
macro for macro in host.get("macros")
if macro.get("macro") == r"{$VMWARE.URL}"
]
if url:
vcenter_url = urlparse(url[0].get("value")).hostname
logging.info(
"\033[32m搜索 ESXI 主机成功,vCenter => '%s', ESXI Host => '%s'\033[0m",
vcenter_url,
host.get("name")
)
update_params = zbx.get_update_params(
inventory=zbx.get_esxi_info(vcenter_ip=vcenter_url, host=host),
host=host
)
if host["inventory_mode"] == "-1": # disabled
update_params["inventory_mode"] = "1" # Auto
zapi.host.update(update_params)
logging.info(
"\033[32mESXI主机Inventory信息更新成功,Host => '%s'\033[0m",
host.get("name")
)
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--limit",
action="append",
help="Specify ip address of 'ESXI' type hosts"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/vmware_host_inventory.py | vmware_host_inventory.py |
import argparse
import logging
from ast import literal_eval
from lib.utils.zbxapis import ZabbixApiUpdate
from lib.utils.format import get_value, jmes_search
from lib.utils.zbxtags import InventoryTagDict, HostTags
class UpdateHtInventory:
def __init__(self, zapi):
self.zapi = ZabbixApiUpdate(zapi)
@staticmethod
def is_vm(items: list):
"""
获取 lastvalue 值为 "VMware" 或者 "KVM" 的主机列表:
:param items:
:return:
"""
return list(
filter(
lambda item: item.get("lastvalue") in literal_eval(
get_value(section="ZABBIX", option="CHASSIS_MODE_TYPES")),
items
)
)
@staticmethod
def is_host(items: list):
return list(
filter(
lambda item: item.get("hosts")[0].get("status") != "3",
items
)
)
@staticmethod
def inventory(item: dict, host: dict, check_type: str):
instance_ = HostTags(host.get("tags"))
inventory_tags = InventoryTagDict(host.get("inventory").get("tag"))
if check_type == "inventory_type":
if host.get("hostDiscovery"):
return {
"host_tags": [],
"inventory_tags": {"type": "VM"}
}
return {
"host_tags": instance_.added_tags("type", "VM"),
"inventory_tags": {"type": "VM"}
}
if check_type == "inventory_tag":
# 如果监控项最新值为 1,在 host tag 中添加 rsyncd 标签
if item["lastclock"] != "0" and item["lastvalue"] == "1":
if host.get("hostDiscovery"):
return {
"host_tags": [],
"inventory_tags": {"tag": str(inventory_tags)}
}
return {
"host_tags": instance_.added_tags("rsyncd", ""),
"inventory_tags": {"tag": str(inventory_tags)}
}
# 如果监控项最新值为 0,删除 host tag 中的 rsyncd 标签
if item["lastclock"] != "0" and item["lastvalue"] == "0":
if "rsyncd" in inventory_tags:
del inventory_tags["rsyncd"]
if host.get("hostDiscovery"):
return {
"host_tags": [],
"inventory_tags": {"tag": str(inventory_tags)}
}
return {
"host_tags": instance_.deleted_tags("rsyncd"),
"inventory_tags": {"tag": str(inventory_tags)}
}
def update_host(self, items: list, checktype: str):
for item in self.is_host(items):
hosts = self.zapi.get_hts(
output=["host"],
hostids=jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_HOSTIDS"),
data=item.get("hosts")
),
selectinventory="extend",
selecttags=["tag", "value"],
selecthostdiscovery="extend"
)
for host in hosts:
inventory = self.inventory(item=item, host=host, check_type=checktype)
if checktype == "inventory_type" and inventory:
update_params = {
"hostid": host["hostid"],
"inventory": inventory.get("inventory_tags"),
"tags": inventory.get("host_tags")
}
# 如 inventory mode 为禁用, 则改为自动
if host.get("inventory").get("inventory_mode") == "-1": # disabled
update_params["inventory_mode"] = "1" # automatic
self.zapi.update_host(update_params)
logging.info("\033[32m成功更新主机 '%s'\033[0m", host.get("host"))
if checktype == "inventory_tag" and inventory:
self.zapi.update_host(
{
"hostid": host["hostid"],
"tags": inventory.get("host_tags"),
"inventory": inventory.get("inventory_tags")
}
)
logging.info("\033[32m成功更新主机 '%s'\033[0m", host.get("host"))
def update_type(self):
items = self.zapi.get_items(
output=["lastvalue"],
selecthosts=["host", "status"],
filter_={"name": "Chassis Model"}
)
if items:
self.update_host(items=self.is_vm(items), checktype="inventory_type")
def update_tag(self):
items = self.zapi.get_items(
output=["lastclock", "lastvalue"],
selecthosts=["host", "status"],
search={"key_": get_value(section="ZABBIX", option="RSYNCD_ITEM")}
)
if items:
self.update_host(items=items, checktype="inventory_tag")
def main(args):
zapi = args.zapi
UpdateHtInventory(zapi).update_tag()
UpdateHtInventory(zapi).update_type()
parser = argparse.ArgumentParser()
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/inventory_supplementary.py | inventory_supplementary.py |
import argparse
import re
import logging
import sys
from ast import literal_eval
import pandas as pd
from lib.utils.zbxapis import ZabbixApiGet, ZabbixApiCreate, ZabbixApiDel
from lib.utils.format import jmes_search, to_excel_, pretty_tbl
from lib.utils.format import get_value, re_search, re_findall, IPS
def show(res):
"""
打印出 Zabbix Discovery Hosts 信息:
:param res:
:return:
"""
field_names = literal_eval(get_value(section="ZABBIX", option="INTERFACES_DATA_COLUMNS"))
tbl = pretty_tbl(
title="Zabbix Host Interfaces",
field_names=field_names,
rows=res.values.tolist()
)
for field in field_names:
tbl.align[field] = "l"
print(tbl)
def save_to_excel(df: pd.DataFrame, fname: str):
suffix = get_value(section="EXCEL", option="EXCEL_SUFFIX")
if not fname.endswith(suffix):
fname = fname + suffix
to_excel_(df, fname)
logging.info("\033[32m成功保存文件:%s\033[0m", fname)
class ZbxInterfaces:
def __init__(self, zapi, check_file: str):
self._zapi = zapi
self.check_file = check_file
@staticmethod
def get_agent_ips(host: dict):
"""
获取 zabbix agent interface ip:
:param host:
:return:
"""
return jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_AGENT_IPS"
),
data=host.get("interfaces")
)
@staticmethod
def get_host_networks(host: dict):
"""
获取 host inventory 中 host_networks 里面的 ip 地址:
:param host:
:return:
"""
if isinstance(host.get("inventory"), list) and not host.get("inventory"):
return set()
if isinstance(host.get("inventory"), dict) and host.get("inventory"):
os_full = host.get("inventory").get("os_full")
networks = host.get("inventory").get("host_networks")
search_win = re_search(
rexp=get_value(section="REXP", option="WIN_HOST"),
content=os_full,
mode=re.I
)
search_linux = re_search(
rexp=get_value(section="REXP", option="LINUX_HOST"),
content=os_full,
mode=re.I
)
if search_win:
host_networks = set(
re_findall(
rexp=get_value(section="REXP", option="WIN_IP"),
content=networks,
mode=re.M
)
)
elif search_linux:
host_networks = set(
re_findall(
rexp=get_value(section="REXP", option="LINUX_IP"),
content=networks,
mode=re.M
)
)
else:
host_networks = set()
return host_networks
@property
def zbx_hosts(self):
return ZabbixApiGet(self._zapi).get_hts(
output=["name", "hostid", "proxy_hostid"],
selectinventory=["inventory_mode", "location", "host_networks", "os_full", "os_short"],
selectinterfaces=["interfaceid", "ip", "type", "main"]
)
def get_other_ips(self, host: dict):
"""
获取额外的 IP:
1. inventory 中 host_networks 里面的 ip 排除掉 agent_ips;
2. 再排除掉不在 IDC_NETWORKS 网段内的 ip;
3. 即不是 agent ip,并且此 IP 在 IDC_NETWORKS 网段内。
:param host:
:return:
"""
instance_ = IPS(self.check_file)
other_ips = list(
self.get_host_networks(host) - set(self.get_agent_ips(host))
)
return [ip for ip in other_ips if instance_.valid_ip(ip)]
def delete_invaild(self, host: dict):
"""
删除 ip 不在 host_networks 中的非第一个 agent interface:
:param host:
:return:
"""
host_networks = self.get_host_networks(host)
if host_networks:
for inf in host.get("interfaces")[::-1]:
# type - 1 - agent
# main - 1 - default
if inf.get("type") == "1" and inf.get("main") != "1":
if inf.get("ip") in host_networks:
continue
ZabbixApiDel(self._zapi).del_interface([inf.get("interfaceid")])
logging.info(
"\033[32m成功删除非法 Interface: host =>'%s', agent_ip =>'%s'\033[0m",
host.get("name"),
inf.get("ip")
)
host.get("interfaces").remove(inf)
def delete_extra(self, host: dict):
"""
指定了 --delete_extra, 只保留第一个 agent interfaces,其余的全部删除:
:param host:
:return:
"""
for inf in host.get("interfaces")[::-1]:
if inf.get("type") == "1" and inf.get("main") != "1":
ZabbixApiDel(self._zapi).del_interface([inf.get("interfaceid")])
logging.info(
"\033[32m成功删除额外 Interface: host =>'%s', agent_ip =>'%s'\033[0m",
host.get("name"),
inf.get("ip")
)
host.get("interfaces").remove(inf)
def add_extra(self, host: dict):
"""
将额外的主机地址添加到 host agent interface:
:param host:
:return:
"""
for other_ip in self.get_other_ips(host):
ZabbixApiCreate(self._zapi).create_ht_interface(
hostid=host.get("hostid"),
ip_=other_ip
)
logging.info(
"\033[32m成功添加 Interface: host =>'%s', extra_ip =>'%s'\033[0m",
host.get("name"),
other_ip
)
host.get("interfaces").append({"main": "0", "type": "1", "ip": other_ip})
def check_agent(self, host: dict):
"""
打印 agent interface IP 地址不存在于主机资产的 host_networks 的信息:
:param host:
:return:
"""
host_networks = self.get_host_networks(host)
if not host_networks:
logging.debug(
"\033[33m主机 '%s' 没有 host_networks,跳过\033[0m",
host.get("name")
)
return []
if host_networks:
return [ip for ip in self.get_agent_ips(host) if ip not in host_networks]
def main(args):
instance_ = ZbxInterfaces(args.zapi, args.check_file)
# 生成 pandas 数据表, 用来输出屏幕和保存文件
df = pd.DataFrame(
columns=literal_eval(get_value(section="ZABBIX", option="INTERFACES_DATA_COLUMNS"))
)
for host in instance_.zbx_hosts:
# 指定了 --delete_invaild, 删除主机中无效的 agent interfaces
if args.delete_invaild:
instance_.delete_invaild(host)
# 指定了 --delete_extra, 只保留第一个 agent interfaces,其余的全部删除
if args.delete_extra:
instance_.delete_extra(host)
# 指定了 --add_extra, 将额外的主机地址添加到 host agent interface
if args.add_extra:
instance_.add_extra(host)
# 指定了 --check_agent, 过滤出 IP 地址不存在于主机资产的 agent interface
if args.check_agent:
agent_ips = instance_.check_agent(host)
if not agent_ips:
logging.debug(
"\033[33m主机 '%s' 所有的 'interface' 都在 host_networks\033[0m",
host.get("name")
)
continue
else:
agent_ips = instance_.get_agent_ips(host)
# 添加数据到数据表
if isinstance(host.get("inventory"), dict) and instance_.get_other_ips(host):
df.loc[len(df)] = [
host.get("name"),
host.get("inventory").get("os_short"),
host.get("inventory").get("location"),
",".join(agent_ips),
",".join(instance_.get_other_ips(host))
]
# 将结果按 location 排序
res = df.sort_values(by=["location"], na_position="last").reset_index(drop=True)
if res.empty:
logging.info("No data retrieved.")
sys.exit(1)
if args.dump and args.dump == "excel":
save_to_excel(res, "results.xlsx")
elif args.dump == "console" and not args.delete_invaild and not args.delete_extra:
show(res)
parser = argparse.ArgumentParser(description="find ip from host inventory")
parser.add_argument("-f", "--check_file", required=True, help="a list of ip range of each IDC")
parser.add_argument(
"--dump",
choices=["console", "excel"],
default="console",
help="Print to screen, or save to excel"
)
parser.add_argument("--check_agent", action="store_true", help="display invalid interface")
parser.add_argument("--delete_invaild", action="store_true", help="delete invaild interface")
parser.add_argument("--add_extra", action="store_true", help="add extra ip to interface")
parser.add_argument("--delete_extra", action="store_true", help="delete extra ip from interface")
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/multi_interfaces.py | multi_interfaces.py |
import argparse
import logging
import sys
from lib.utils.zbxapis import ZabbixApiCreate, ZabbixApiDel
from lib.utils.format import jmes_search, get_value
class CreateItService:
def __init__(self, zapi, service_name: str, group_name: str):
self.zapi = ZabbixApiCreate(zapi)
self.service_name = service_name
self.group_name = group_name
def get_sla_tag(self, sla_name: str):
"""
获取 Zabbix SLA 中的 Service tags:
1. 区别于 Zabbix 4.0 及更老版本,在 Zabbix 6.0 中不能直接指定 SLA,
而是需要先在 SLA 中添加 Service tags,
然后再在 Service 中添加此 Service tag 才可以将两者关联起来。
:param sla_name:
:return:
"""
sla = self.zapi.get_sla(
output=["slaid", "name"],
selectservicetags=["tag", "value"],
filter_={"name": sla_name}
)
if sla and sla[0].get("service_tags"):
return sla[0].get("service_tags")
if not sla or not sla[0].get("service_tags"):
logging.info(
"\033[32mSLA '%s' 不存在,或者不存在 'service tag'\033[0m",
sla_name
)
return []
def create(self, prb_tag_name: str, sla_name: str):
"""
根据 Problem 的标签名称和 SLA 名称创建 Service:
1. 在 Zabbix 老版本中是可以直接指定 trigger id 的,由此可以将 Service 和 Event 告警事件关联起来,
但是在 Zabbix 6.0 版本中,在创建 Service 时必须要指定 Problem 的 tag 名称,
只要这样才可以将 Service 和 Event 告警事件关联起来。
:param prb_tag_name:
:param sla_name:
:return:
"""
sla = self.zapi.get_sla(
output=["slaid", "name"],
selectservicetags=["tag", "value"],
filter_={"name": sla_name}
)
if sla and sla[0].get("service_tags"):
rootsrv = self.zapi.get_service(filter_={"name": self.service_name})
if rootsrv:
logging.info(
"\033[32mService '%s'已经存在,默认不做操作\033[0m",
self.service_name
)
return
rootsrv = self.zapi.create_service(
service_name=self.service_name,
tags=self.get_sla_tag(sla_name)
)
hosts = self.zapi.get_hts(
output=["name"],
groupids=jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_GROUPIDS"),
data=self.zapi.get_ht_grps(
output=["groupid"],
filter_={"name": self.group_name}
)
),
selecttriggers="extend"
)
for host in hosts:
hostsrv = self.zapi.create_service(
service_name=host.get("name"),
parents=[{"serviceid": rootsrv.get("serviceids")[0]}],
tags=self.get_sla_tag(sla_name)
)
for trigger in host.get("triggers"):
self.zapi.create_service(
service_name=trigger.get("description"),
parents=[{"serviceid": hostsrv.get("serviceids")[0]}],
problem_tags=[{"tag": prb_tag_name, "operator": 2, "value": ""}],
tags=self.get_sla_tag(sla_name)
)
logging.info("\033[33m成功创建 Service '%s'\033[0m", self.service_name)
class DeleteItService:
def __init__(self, zapi, service_name):
self.zapi = ZabbixApiDel(zapi)
self.service_name = service_name
def hard_service_delete(self, service):
"""
删除一个 Service 及其下的所有的子 Service:
1. 在删除 Zabbix Service 时,如果只删除此 Service 只会将这个 Service 本身删除,
但是这个 Service 下面的子 Service 以及递归的 Service 却并不会删除,
所有要递归删除下面的所有 Service。
:param service:
:return:
"""
for node in service.get("children"):
tmpsrvs = self.zapi.get_service(
serviceids=node.get("serviceid"),
selectchildren="extend"
)
for tmpsrv in tmpsrvs:
self.hard_service_delete(tmpsrv)
self.zapi.del_service([service.get("serviceid")])
def delete(self):
rootsrvs = self.zapi.get_service(
filter_={"name": self.service_name},
selectchildren="extend"
)
for rootsrv in rootsrvs:
self.hard_service_delete(rootsrv)
logging.info(
"\033[33m成功删除 Service '%s'\033[0m",
self.service_name
)
def main(args):
if args.action == "create":
if not args.group_name or not args.tag_name or not args.sla_name:
parser.print_help()
logging.error("the argument --group-name/--tag-name/--sla-name is required")
sys.exit(1)
CreateItService(args.zapi, args.service_name, args.group_name).create(
args.tag_name,
args.sla_name
)
if args.action == "delete":
DeleteItService(args.zapi, args.service_name).delete()
parser = argparse.ArgumentParser(description="Create or delete zabbix service tree")
parser.add_argument("action", choices=["create", "delete"], help="Create/Delete IT service Tre")
parser.add_argument(
"-n",
"--service-name",
required=True,
help="The Name of IT service Tree's root"
)
parser.add_argument("-g", "--group-name", help="Create IT service tree from the Group")
parser.add_argument("-t", "--tag-name", help="Problem tag name")
parser.add_argument("-s", "--sla-name", help="Zabbix SLA name")
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/service_tree.py | service_tree.py |
import argparse
import time
import logging
import math
import os
from collections import defaultdict, Counter
from itertools import groupby
from operator import itemgetter
from datetime import datetime
from dateutil.parser import parse
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.enum.section import WD_ORIENT
from docx.enum.table import WD_ALIGN_VERTICAL
from lib.utils.zbxapis import ZabbixApiGet
from lib.utils.format import make_timestamp, convert_unit, convert_pdf, DiskCache
from lib.utils.format import get_value, jmes_search
from lib.utils.docs import *
class ZbxReport:
def __init__(self, api, start, end):
self._api = api
self.start = start
self.end = end
self._cache = DiskCache()
@property
def server_hostid(self):
"""
获取 "Zabbix Server" 主机的 hostid:
:return:
"""
if self._cache.get_cache("hostid_zbx_server"):
return self._cache.get_cache("hostid_zbx_server")
if not self._cache.get_cache("hostid_zbx_server"):
server_host = self._api.get_hts(filter_={"host": "Zabbix server"})
if server_host:
self._cache.set_cache(
"hostid_zbx_server",
server_host[0].get("hostid")
)
return self._cache.get_cache("hostid_zbx_server")
@property
def htgrps(self):
"""
获取 Zabbix 主机组信息:
:return:
"""
if self._cache.get_cache("htgrps_normal"):
return list(self._cache.get_cache("htgrps_normal"))
if not self._cache.get_cache("htgrps_normal"):
htgrps = self._api.get_ht_grps(
output=["name"],
selecthosts=["hostid", "name"],
real_hosts=True,
with_monitored_items=True,
filter_={"flags": 0}
)
self._cache.set_cache("htgrps_normal", htgrps, expire=300)
return list(self._cache.get_cache("htgrps_normal"))
@property
def vm_hosts(self):
"""
获取 Host Inventory Type 为 "VM" 的主机信息:
:return:
"""
hosts = self._api.get_hts(
with_monitored_items=True,
searchinventory={"type": "VM"},
filter_={"flags": 0},
output=["hostid"],
selectinterfaces=["available"]
)
hosts = jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_AVAILABLE_HOSTS"
),
data=hosts
)
return hosts
def get_host_items(self, group: dict):
"""
获取 Zabbix 主机指定项的 Item 信息:
:param group:
:return:
"""
if self._cache.get_cache("items_" + str(group.get("groupid"))):
return list(self._cache.get_cache("items_" + str(group.get("groupid"))))
if not self._cache.get_cache("items_" + str(group.get("groupid"))):
items = self._api.get_items(
hostids=[host.get("hostid") for host in group.get("hosts")],
output=["name", "key_", "hostid"],
monitored=True,
filter_={
"key_": [
"vfs.fs.totalsize",
"vfs.fs.usedsize",
"system.cpu.util[,idle]",
"vm.memory.size[used]",
"vm.memory.size[total]"
],
"state": 0
}
)
self._cache.set_cache(
"items_" + str(group.get("groupid")),
items,
expire=60
)
return list(self._cache.get_cache("items_" + str(group.get("groupid"))))
@property
def items(self):
"""
获取 "Zabbix Server" 主机为开启状态的 Items 信息:
:return:
"""
if self._cache.get_cache("zbx_server_items"):
return list(self._cache.get_cache("zbx_server_items"))
if not self._cache.get_cache("zbx_server_items"):
items = self._api.get_items(
hostids=self.server_hostid,
output=["name", "key_"],
monitored=True,
filter_={"state": 0}
)
self._cache.set_cache(
"zbx_server_items",
items,
expire=60
)
return list(self._cache.get_cache("zbx_server_items"))
def get_itemids(self, name: str):
"""
获取 Zabbix Items 的 id:
:param name:
:return:
"""
if self._cache.get_cache("itemids_" + name):
return list(self._cache.get_cache("itemids_" + name))
if not self._cache.get_cache("itemids_" + name):
itemids = [item.get("itemid") for item in self.items if item.get("name") == name]
self._cache.set_cache(
"itemids_" + name,
itemids,
expire=60
)
return list(self._cache.get_cache("itemids_" + name))
def getcalc(self, itemids):
"""
获取【计算型】监控项指定时间范围内的最大值、最小值、平均值:
:param itemids:
:return:
"""
trends = self._api.get_trends(
itemids=itemids,
time_from=make_timestamp(self.start),
time_till=make_timestamp(self.end)
)
if len(trends) != 0:
values_min = []
values_max = []
values_avg = []
for trend in trends:
values_min.append(float(trend["value_min"]))
values_max.append(float(trend["value_max"]))
values_avg.append(float(trend["value_avg"]))
avg_value = round(sum(values_avg) / len(values_avg), 2)
min_value = min(values_min)
max_value = max(values_max)
return min_value, max_value, avg_value
return 0, 0, 0
def get_zbx_events(self, severities: list):
"""
获取 Zabbix 监控主机的告警事件:
:param severities:
:return:
"""
return self._api.get_events(
countoutput=True,
value=1,
severities=severities,
time_from=make_timestamp(self.start),
time_till=make_timestamp(self.end)
)
def get_word(api, path, start, end, topnum):
"""" 生成word统计报表 """
document = Document()
docs = PyDocs(document)
zbx = ZbxReport(api, start, end)
document.styles["Normal"].font.name = "微软雅黑"
document.styles["Normal"]._element.rPr.rFonts.set(qn("w:eastAsia"), "微软雅黑")
docs.add_para(
run_="\n鑫运运管平台\n监控统计分析月报\n\n",
pt_=36,
rgb_=(79, 129, 189),
alignment=WD_ALIGN_PARAGRAPH.CENTER
)
docs.add_para(
run_="\n\n" + end[0:4] + "年" + end[4:6] + "月",
pt_=18,
rgb_=(79, 129, 189),
alignment=None
)
document.add_page_break()
# 1.汇总信息页
docs.add_heading(level=1, run_="一、汇总信息", pt_=20)
# 1.1表头
table_total = docs.add_total_tbl()
# 1.2表内数据
table_total.cell(0, 1).text = "{} - {}".format(
time.strftime("%Y/%m/%d", time.strptime(start, "%Y%m%d")),
time.strftime("%Y/%m/%d", time.strptime(end, "%Y%m%d")))
# 获取主机组
host_groups = zbx.htgrps
# 主机组总数量
groups_num = len(host_groups)
# 主机总数量
hosts_sum = []
for grp in host_groups:
hosts_sum += [host.get("hostid") for host in grp.get("hosts")]
# 获取严重告警数量
event_sum_num = zbx.get_zbx_events([3, 4, 5])
table_total.cell(1, 1).text = str(groups_num)
table_total.cell(2, 1).text = str(len(set(hosts_sum)))
table_total.cell(3, 1).text = str(event_sum_num)
run_event_number = document.add_paragraph("")
run_event_number.paragraph_format.space_before = 15
table_detail_number = docs.add_detail_table()
# 获取对应告警级别数量
for severity in range(6):
event_num = zbx.get_zbx_events([severity])
table_detail_number.cell(2, severity).text = str(event_num)
docs.add_para(
run_="注: `严重`、`危险`、`灾难` 三个等级的告警纳入严重告警统计",
pt_=10,
rgb_=(0, 139, 0),
alignment=None
)
# 严重告警数量表格
document.add_page_break()
docs.add_heading(level=1, run_="二、严重告警数量排行", pt_=20)
# 新增2级标题
docs.add_heading(level=2, run_=f"1、严重告警数量最多的{topnum}个主机组", pt_=16)
# 插入表格
event_table_desc_group = docs.add_event_grp_tbl()
document.add_page_break()
# 严重告警数量最多的主机
logging.info("\033[32m严重告警数量排行, 主机维度\033[0m")
docs.add_heading(level=2, run_=f"2、严重告警数量最多的{topnum}台主机", pt_=16)
event_table_desc_host = docs.add_event_host_tbl()
# 2.详细统计信息页
# 2.1 表头
document.add_page_break()
docs.add_heading(level=1, run_="三、主机组资源利用率概览", pt_=20)
# 2.3.2 获取 zabbix server 中各 hostgroup的聚合 item
# 根据 Average cpu utilization 监控项确定主机组
valid_hostgroup_names = [
item.get("name").split("group")[1].strip()
for item in zbx.items
if item.get("name").startswith("Average cpu utilization in group")
]
host_groups = [g for g in host_groups if g.get("name") in valid_hostgroup_names]
# 按主机数量排序主机组
host_groups.sort(key=lambda x: len(x["hosts"]), reverse=True)
# 2.3.3 设置按主机组维度统计数据的变量
# 主机组维度按内存使用率avg数组
memory_top_group = []
# cpu利用率数组(主机组维度)
cpu_top_group = []
# 磁盘使用率(主机组维度)
filesystem_top_group = []
# 告警数量 (主机组维度)
event_count_group = []
# 主机维度按内存使用率avg数组
memory_top_host = []
# cpu利用率数组(主机维度)
cpu_top_host = []
# 磁盘使用率(主机维度)
filesystem_top_host = []
# 告警数量 (主机组维度)
event_count_host = []
# 2.3.4 填充表格数据
summarize_row_count = 0
for index, group in enumerate(host_groups):
group_name = group.get("name")
logging.info("\033[33m正在处理数据……主机组:%s\033[0m", group_name)
logging.info("\033[33m开始时间:%s\033[0m", str(datetime.now()))
summarize_table = None
if summarize_row_count == 0:
summarize_table = docs.add_summarize_table()
host_num = len(group.get("hosts"))
row = summarize_table.add_row()
row.cells[0].text = group_name
row.cells[1].text = str(host_num)
# group_name 5个字一行, 计算共占多少行
summarize_row_count += math.ceil(len(group_name) / 5)
# 获取cpu利用率
_, _, avg_v = zbx.getcalc(zbx.get_itemids(f"Average cpu utilization in group {group_name}"))
colored_cell(row.cells[2], avg_v)
# 保留信息
cpu_top_group.append(
{
"groupname": group_name,
"hostnum": host_num,
"cpu_utilization": avg_v
}
)
# 获取内存总量
_, _, avg_v = zbx.getcalc(zbx.get_itemids(f"Total memory in group {group_name}"))
row.cells[3].text = convert_unit(avg_v)
memory_dic = {
"groupname": group_name,
"hostnum": host_num,
"memory_total": avg_v
}
# 获取内存利用率
min_v, max_v, avg_v = zbx.getcalc(
zbx.get_itemids(f"Memory utilization in group {group_name}")
)
colored_cell(row.cells[4], max_v)
colored_cell(row.cells[5], min_v)
colored_cell(row.cells[6], avg_v)
memory_dic["memory_utilization"] = avg_v
memory_top_group.append(memory_dic)
# 获取磁盘总量
_, _, avg_v = zbx.getcalc(zbx.get_itemids(f"Total disk space in {group_name}"))
row.cells[7].text = convert_unit(avg_v)
filesystem_dic = {
"groupname": group_name,
"hostnum": host_num,
"filesystem_total": avg_v
}
# 获取磁盘使用率
min_v, max_v, avg_v = zbx.getcalc(
zbx.get_itemids(f"Used disk space in {group_name} (percentage)")
)
colored_cell(row.cells[8], max_v)
colored_cell(row.cells[9], min_v)
colored_cell(row.cells[10], avg_v)
filesystem_dic["filesystem_utilization"] = avg_v
filesystem_top_group.append(filesystem_dic)
# 按主机维度处理信息,包括过滤警告,以及获取对应主机的分析数据
group_host_keys = defaultdict(dict)
for host_item in zbx.get_host_items(group=group):
host_name = [
host["name"] for host in group["hosts"]
if host["hostid"] == host_item["hostid"]
][0]
group_host_keys[host_name][host_item["key_"]] = host_item["itemid"]
group_host_keys[host_name]["hostid"] = host_item["hostid"]
for host_name, host_keys in group_host_keys.items():
# 获取主机分析数据
# 内存 used 、 total
if host_keys.get("vm.memory.size[total]"):
_, _, mem_avg_used = zbx.getcalc(host_keys["vm.memory.size[used]"])
_, _, mem_avg_total = zbx.getcalc(host_keys["vm.memory.size[total]"])
if mem_avg_total != 0:
# 内存使用率
memory_top_host.append(
{
"hostname": host_name,
"memory_utilization": 100 * mem_avg_used / mem_avg_total,
"memory_total": mem_avg_total,
"groupname": group_name
}
)
# cpu 使用率
if host_keys.get("system.cpu.util[,idle]"):
_, _, cpu_avg_idle = zbx.getcalc(host_keys["system.cpu.util[,idle]"])
if cpu_avg_idle != 0:
cpu_top_host.append(
{
"hostname": host_name,
"hostid": host_keys.get("hostid"),
"cpu_utilization": 100 - cpu_avg_idle,
"groupname": group_name
}
)
# 磁盘 used 、 total
if host_keys.get("vfs.fs.totalsize") and host_keys.get("vfs.fs.usedsize"):
_, _, disk_avg_used = zbx.getcalc(host_keys.get("vfs.fs.usedsize"))
_, _, disk_avg_total = zbx.getcalc(host_keys.get("vfs.fs.totalsize"))
# 磁盘 使用率
if disk_avg_used != 0:
filesystem_top_host.append(
{
"hostname": host_name,
"filesystem_utilization": 100 * disk_avg_used / disk_avg_total,
"filesystem_total": disk_avg_total,
"groupname": group_name
}
)
events = api.get_events(
output=["eventid"],
selecthosts=["name"],
hostids=[host.get("hostid") for host in group.get("hosts")],
value=1,
severities=[3, 4, 5],
time_from=make_timestamp(start),
time_till=make_timestamp(end)
)
row.cells[11].text = str(len(events))
# 主机组维度 严重告警事件数量
event_count_dic = {
"groupname": group_name,
"hostnum": host_num,
"events_count": len(events)
}
event_count_group.append(event_count_dic)
# 主机维度 严重告警事件数量
events_by_host = Counter(e['hosts'][0]['name'] for e in events if e['hosts'])
for host_name in events_by_host:
event_count_host.append(
{
"hostname": host_name,
"events_count": events_by_host[host_name],
"groupname": group_name
}
)
if index == len(host_groups) - 1:
document.add_page_break()
elif summarize_row_count >= 18:
summarize_row_count = 0
document.add_page_break()
# 主机组按严重告警数量排序desc
event_count_group.sort(key=lambda x: x["events_count"], reverse=True)
for i in range(min(topnum, len(event_count_group))):
row = event_table_desc_group.add_row()
row.cells[0].text = event_count_group[i]["groupname"]
row.cells[1].text = str(event_count_group[i]["hostnum"])
row.cells[2].text = str(event_count_group[i]["events_count"])
event_count_host.sort(key=lambda x: x["events_count"], reverse=True)
for i in range(min(topnum, len(event_count_host))):
row = event_table_desc_host.add_row()
row.cells[0].text = event_count_host[i]["groupname"]
row.cells[1].text = event_count_host[i]["hostname"]
row.cells[2].text = str(event_count_host[i]["events_count"])
# 3. 内存使用率排行
docs.add_heading(level=1, run_="四、内存使用率排行", pt_=20)
docs.add_heading(level=2, run_=f"1、内存使用率最高的{topnum}个主机组", pt_=16)
# 插入表格 按内存使用率排序desc
table_memory_group_desc = docs.add_mem_grp_tbl()
memory_top_group.sort(key=lambda x: x["memory_utilization"], reverse=True)
for i in range(min(topnum, len(memory_top_group))):
row = table_memory_group_desc.add_row()
row.cells[0].text = memory_top_group[i]["groupname"]
row.cells[1].text = str(memory_top_group[i]["hostnum"])
colored_cell(row.cells[2], memory_top_group[i]["memory_utilization"])
row.cells[3].text = str(convert_unit(memory_top_group[i]["memory_total"]))
document.add_page_break()
docs.add_heading(level=2, run_=f"2、内存使用率最低的{topnum}个主机组", pt_=16)
# 插入表格 按内存使用率排序asc
table_memory_group_asc = docs.add_mem_grp_tbl()
memory_top_group.sort(key=lambda x: x["memory_utilization"])
for i in range(min(topnum, len(memory_top_group))):
row = table_memory_group_asc.add_row()
row.cells[0].text = memory_top_group[i]["groupname"]
row.cells[1].text = str(memory_top_group[i]["hostnum"])
colored_cell(row.cells[2], memory_top_group[i]["memory_utilization"])
row.cells[3].text = str(convert_unit(memory_top_group[i]["memory_total"]))
document.add_page_break()
docs.add_heading(level=2, run_=f"3、内存使用率最高的{topnum}台主机", pt_=16)
# 插入表格
table_memory_host_desc = docs.add_mem_host_tbl()
memory_top_host.sort(key=itemgetter("memory_utilization"))
memory_top_host_groupby = []
for hostname, hosts_iter in groupby(memory_top_host, key=itemgetter("hostname")):
hosts = list(hosts_iter)
memory_top_host_groupby.append(
{
"hostname": hostname,
"memory_utilization": hosts[0]["memory_utilization"],
"memory_total": hosts[0]["memory_total"],
"groupname": ','.join(h['groupname'] for h in hosts)
}
)
memory_top_host_groupby.sort(key=itemgetter("memory_utilization"), reverse=True)
for i in range(min(topnum, len(memory_top_host))):
row = table_memory_host_desc.add_row()
row.cells[0].text = memory_top_host_groupby[i]["groupname"]
row.cells[1].text = memory_top_host_groupby[i]["hostname"]
colored_cell(row.cells[2], memory_top_host_groupby[i]["memory_utilization"])
row.cells[3].text = str(convert_unit(memory_top_host_groupby[i]["memory_total"]))
document.add_page_break()
docs.add_heading(level=2, run_=f"4、内存使用率最低的{topnum}台主机", pt_=16)
# 插入表格
table_memory_host_asc = docs.add_mem_host_tbl()
memory_top_host_groupby.sort(key=itemgetter("memory_utilization"))
for i in range(min(topnum, len(memory_top_host))):
row = table_memory_host_asc.add_row()
row.cells[0].text = memory_top_host_groupby[i]["groupname"]
row.cells[1].text = memory_top_host_groupby[i]["hostname"]
colored_cell(row.cells[2], memory_top_host_groupby[i]["memory_utilization"])
row.cells[3].text = str(convert_unit(memory_top_host_groupby[i]["memory_total"]))
document.add_page_break()
docs.add_heading(level=1, run_="五、CPU使用率排行", pt_=20)
docs.add_heading(level=2, run_=f"1、CPU使用率最高的{topnum}个主机组", pt_=16)
# 插入表格 按cpu使用率排序主机组维度 desc
table_cpu_group_desc = docs.add_cpu_grp_tbl()
cpu_top_group.sort(key=lambda x: x["cpu_utilization"], reverse=True)
for i in range(min(topnum, len(cpu_top_group))):
row = table_cpu_group_desc.add_row()
row.cells[0].text = cpu_top_group[i]["groupname"]
row.cells[1].text = str(cpu_top_group[i]["hostnum"])
colored_cell(row.cells[2], cpu_top_group[i]["cpu_utilization"])
document.add_page_break()
docs.add_heading(level=2, run_=f"2、CPU使用率最低的{topnum}个主机组", pt_=16)
# 插入表格 按cpu使用率排序 主机组维度 asc
table_cpu_group_asc = docs.add_cpu_grp_tbl()
cpu_top_group.sort(key=lambda x: x["cpu_utilization"])
for i in range(min(topnum, len(cpu_top_group))):
row = table_cpu_group_asc.add_row()
row.cells[0].text = cpu_top_group[i]["groupname"]
row.cells[1].text = str(cpu_top_group[i]["hostnum"])
colored_cell(row.cells[2], cpu_top_group[i]["cpu_utilization"])
document.add_page_break()
docs.add_heading(level=2, run_=f"3、CPU使用率最高的{topnum}台主机", pt_=16)
# 插入表格 cpu使用率主机维度 desc
table_cpu_host_desc = docs.add_cpu_host_tbl()
cpu_top_host.sort(key=itemgetter("cpu_utilization"))
cpu_top_host_groupby = []
for hostname, hosts_iter in groupby(cpu_top_host, key=itemgetter("hostname")):
hosts = list(hosts_iter)
cpu_top_host_groupby.append(
{
"hostname": hostname,
"cpu_utilization": hosts[0]["cpu_utilization"],
"groupname": ','.join(h['groupname'] for h in hosts)
}
)
cpu_top_host_groupby.sort(key=itemgetter("cpu_utilization"), reverse=True)
for i in range(min(topnum, len(cpu_top_host_groupby))):
row = table_cpu_host_desc.add_row()
row.cells[0].text = cpu_top_host_groupby[i]["groupname"]
row.cells[1].text = cpu_top_host_groupby[i]["hostname"]
colored_cell(row.cells[2], cpu_top_host_groupby[i]["cpu_utilization"])
document.add_page_break()
docs.add_heading(level=2, run_=f"4、CPU使用率最低的{topnum}台主机", pt_=16)
# 插入表格
table_cpu_host_asc = docs.add_cpu_host_tbl()
cpu_top_host_groupby.sort(key=itemgetter("cpu_utilization"))
for i in range(min(topnum, len(cpu_top_host_groupby))):
row = table_cpu_host_asc.add_row()
row.cells[0].text = cpu_top_host_groupby[i]["groupname"]
row.cells[1].text = cpu_top_host_groupby[i]["hostname"]
colored_cell(row.cells[2], cpu_top_host_groupby[i]["cpu_utilization"])
document.add_page_break()
docs.add_heading(level=1, run_="六、磁盘使用率排行", pt_=20)
docs.add_heading(level=2, run_=f"1、磁盘使用率最高的{topnum}个主机组", pt_=16)
# 插入表格主机组按磁盘使用率排序desc
table_disk_group_desc = docs.add_disk_grp_tbl()
filesystem_top_group.sort(key=lambda x: x["filesystem_utilization"], reverse=True)
for i in range(min(topnum, len(filesystem_top_group))):
row = table_disk_group_desc.add_row()
row.cells[0].text = filesystem_top_group[i]["groupname"]
row.cells[1].text = str(filesystem_top_group[i]["hostnum"])
colored_cell(row.cells[2], filesystem_top_group[i]["filesystem_utilization"])
row.cells[3].text = str(convert_unit(
filesystem_top_group[i]["filesystem_total"]))
document.add_page_break()
docs.add_heading(level=2, run_=f"2、磁盘使用率最低的{topnum}个主机组", pt_=16)
# 插入表格 主机组按磁盘使用率排序asc
table_disk_group_asc = docs.add_disk_grp_tbl()
filesystem_top_group.sort(key=lambda x: x["filesystem_utilization"])
for i in range(min(topnum, len(filesystem_top_group))):
row = table_disk_group_asc.add_row()
row.cells[0].text = filesystem_top_group[i]["groupname"]
row.cells[1].text = str(filesystem_top_group[i]["hostnum"])
colored_cell(row.cells[2], filesystem_top_group[i]["filesystem_utilization"])
row.cells[3].text = str(convert_unit(filesystem_top_group[i]["filesystem_total"]))
document.add_page_break()
docs.add_heading(level=2, run_=f"3、磁盘使用率最高的{topnum}台主机", pt_=16)
# 插入表格 磁盘使用率 主机维度 desc
table_disk_disk_desc = docs.add_disk_disk_tbl()
filesystem_top_host.sort(key=itemgetter("hostname"))
filesystem_top_host_groupby = []
for hostname, hosts_iter in groupby(filesystem_top_host, key=itemgetter("hostname")):
hosts = list(hosts_iter)
filesystem_top_host_groupby.append(
{
"hostname": hostname,
"filesystem_utilization": hosts[0]["filesystem_utilization"],
"filesystem_total": hosts[0]["filesystem_total"],
"groupname": ','.join(h['groupname'] for h in hosts)
}
)
filesystem_top_host_groupby.sort(
key=itemgetter("filesystem_utilization"), reverse=True)
for i in range(min(topnum, len(filesystem_top_host_groupby))):
row = table_disk_disk_desc.add_row()
row.cells[0].text = filesystem_top_host_groupby[i]["groupname"]
row.cells[1].text = filesystem_top_host_groupby[i]["hostname"]
colored_cell(row.cells[2], filesystem_top_host_groupby[i]["filesystem_utilization"])
row.cells[3].text = str(convert_unit(filesystem_top_host_groupby[i]["filesystem_total"]))
document.add_page_break()
docs.add_heading(level=2, run_=f"4、磁盘使用率最低的{topnum}台主机", pt_=16)
# 插入表格 磁盘使用率 主机维度 asc
table_disk_disk_asc = docs.add_disk_disk_tbl()
filesystem_top_host_groupby.sort(key=itemgetter("filesystem_utilization"))
for i in range(min(topnum, len(filesystem_top_host_groupby))):
row = table_disk_disk_asc.add_row()
row.cells[0].text = filesystem_top_host_groupby[i]["groupname"]
row.cells[1].text = filesystem_top_host_groupby[i]["hostname"]
colored_cell(row.cells[2], filesystem_top_host_groupby[i]["filesystem_utilization"])
row.cells[3].text = str(convert_unit(filesystem_top_host_groupby[i]["filesystem_total"]))
# cpu使用率低于1%的虚拟机列表
document.add_page_break()
docs.add_heading(level=1, run_="七、CPU使用率较低的虚拟机", pt_=20)
docs.add_heading(level=2, run_="1、CPU使用率低于1%的虚拟机", pt_=16)
# 插入表格
vm_page_num = 1
vm_page_row_count = 0
vm_nodata = True
for vm in zbx.vm_hosts:
vm_cpu_info = [host for host in cpu_top_host if host["hostid"] == vm["hostid"]]
if not vm_cpu_info:
continue
if vm_cpu_info[0].get("cpu_utilization", 0) < 1:
if vm_page_row_count == 0:
table_vm_cpu = docs.add_vm_table()
row = table_vm_cpu.add_row()
row.cells[0].text = vm_cpu_info[0]["groupname"]
row.cells[1].text = vm_cpu_info[0]["hostname"]
colored_cell(row.cells[2], vm_cpu_info[0]["cpu_utilization"])
vm_page_row_count += 1
if (vm_page_num == 1 and vm_page_row_count >= 17) or (vm_page_row_count >= 21):
# 第一页满17行换页 非第一页满21行换页
vm_page_num += 1
vm_page_row_count = 0
vm_nodata = False
document.add_page_break()
# 无数据则填充一行`无`
if vm_nodata:
table_vm_cpu = docs.add_vm_table()
row = table_vm_cpu.add_row()
for i in range(len(table_vm_cpu.columns)):
row.cells[i].text = "无"
# 设置纸张方向为横向
for section in document.sections:
section.orientation = WD_ORIENT.LANDSCAPE
section.page_width, section.page_height = section.page_height, section.page_width
# 遍历所有表格, 为每个单元格添加边框, 设置文字居中
for _, table in enumerate(document.tables):
for r, row in enumerate(table.rows):
for c, cell in enumerate(row.cells):
if c == 0:
continue # 跳过第一列
if r == 0:
# 表头用浅色边框
color = "#DDDDDD"
else:
# 深色边框
color = "#7BA0CD"
set_cell_border(
cell,
start={"sz": 1, "color": color, "val": "single"}
)
# 除第一列外,表格数据居中显示
if c > 0:
cell.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
cell.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 调整各级别告警数量表格样式
for row in range(1, 3):
for col in range(6):
table_detail_number.cell(row, col).paragraphs[0].runs[0].font.size = Pt(12)
table_detail_number.cell(row, col).paragraphs[0].runs[0].bold = False
table_detail_number.cell(row, col).vertical_alignment = WD_ALIGN_VERTICAL.CENTER
table_detail_number.cell(
row, col
).paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
document.save(path)
def main(args):
"""main function"""
zapi = ZabbixApiGet(args.zapi)
start = datetime.strftime(parse(args.start), "%Y%m%d")
output_docx = args.output or f"运管平台统计分析月报{start[:4]}年{start[4:6]}月.docx"
get_word(
api=zapi,
path=output_docx,
start=start,
end=datetime.strftime(parse(args.end), "%Y%m%d"),
topnum=args.topnum
)
logging.info(
"\033[32mWord报表导出完成:%s\033[0m",
os.path.abspath(output_docx) if os.path.exists(output_docx) else ""
)
convert_pdf(api_secret=args.api_secret, output_docx=output_docx)
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--start",
required=True,
help="The Start Date",
type=str
)
parser.add_argument(
"-e",
"--end",
required=True,
help="The End Date",
type=str
)
parser.add_argument(
"-o",
"--output",
type=str,
help="output filename"
)
parser.add_argument(
"-t",
"--topnum",
type=int,
default=10
)
parser.add_argument(
"-a",
"--api-secret",
type=str,
help="convertapi api_secret"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/gen_analaysis_report.py | gen_analaysis_report.py |
import argparse
import logging
import sys
from lib.utils.zbxapis import ZabbixApiUpdate
from lib.utils.format import pretty_tbl, jmes_search, get_value, DiskCache
def show(hosts_info: list):
"""
打印出 Zabbix Templates 信息:
:param hosts_info:
:return:
"""
tbl = pretty_tbl(
title="Results of linked templates",
field_names=["Host", "Templates"],
rows=jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_EACH_HOST_TEMPLATES"
),
data=hosts_info
)
)
tbl.align["Host"] = "l"
tbl.align["Templates"] = "l"
print(tbl)
class BaseTool:
def __init__(self, args):
self._zapi = ZabbixApiUpdate(args.zapi)
self.hosts = args.hosts or [""]
self.ht_grps = args.groups or [""]
self._cache = DiskCache()
@property
def hosts_info(self):
"""
获取 Zabbix 主机信息:
1. 主要包含模板信息。
:return:
"""
if self.ht_grps:
self.hosts.extend(
jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_HOSTGROUP_HOSTS_HOST"
),
data=self._zapi.get_ht_grps(
filter_={"name": self.ht_grps},
selecthosts=["host"]
)
)
)
if self.hosts:
return self._zapi.get_hts(
filter_={"name": self.hosts},
selectparenttemplates=["host"],
output=["host"]
)
def get_templates_info(self, tpl_names: list):
"""
获取 Zabbix 模板信息:
:param tpl_names:
:return:
"""
tpl_info = []
if tpl_names:
for tpl_name in tpl_names:
if self._cache.get_cache("tpl_" + tpl_name):
tpl_info.append(dict(self._cache.get_cache("tpl_" + tpl_name)))
if not self._cache.get_cache("tpl_" + tpl_name):
tpl = self._zapi.get_tpls(
filter_={"host": tpl_name},
output=["host"]
)
self._cache.set_cache(
"tpl_" + tpl_name,
tpl[0],
expire=60
)
tpl_info.append(dict(self._cache.get_cache("tpl_" + tpl_name)))
return tpl_info
def if_tpl_exist(self, tpl_names: list):
"""
判断指定的模板是否存在于 Zabbix 中:
:param tpl_names:
:return:
"""
return [
tpl_name
for tpl_name in tpl_names if
self._zapi.get_tpls(filter_={"host": tpl_name})
and self._zapi.get_tpls(filter_={"host": tpl_name})[0]
]
@staticmethod
def filter_tpls(host: dict, templates, type_: str):
"""
过滤 Zabbix 模板:
:param host:
:param templates:
:param type_:
:return:
"""
parent_tpls = jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_HOST_PARENTS_TEMPLATES"),
data=host
)
if type_.lower() == "add":
return [tpl for tpl in templates if tpl not in parent_tpls]
if type_.lower() == "del" or type_.lower() == "rep":
return [tpl for tpl in templates if tpl in parent_tpls]
class ListTemplate(BaseTool):
def __init__(self, args):
super().__init__(args)
show(self.hosts_info)
class AddTemplate(BaseTool):
def __init__(self, args):
super().__init__(args)
self.templates = args.add
self.add()
def add(self):
"""
往 Zabbix 主机添加新模板:
1. 这个新模板不能已经存在于 Zabbix 主机中,否则会报错。
:return:
"""
for host in self.hosts_info:
tpls = self.filter_tpls(
host=host,
templates=self.if_tpl_exist(tpl_names=self.templates),
type_="add"
)
if tpls:
params = {
"hostid": host.get("hostid"),
"templates": self.get_templates_info(tpls) + host.get("parentTemplates")
}
self._zapi.update_host(params)
logging.info(
"\033[32m成功更新主机 '%s',添加新模板 => '%s'\033[0m",
host.get("host"),
"、".join([tpl for tpl in tpls if tpls])
)
show(self.hosts_info)
class ClearTemplate(BaseTool):
def __init__(self, args):
self.templates = args.clear
super().__init__(args)
self.clear()
def clear(self):
"""
删除 Zabbix 主机模板:
1. 这个模板必须已经存在于 Zabbix 主机中,否则无法删除。
:return:
"""
for host in self.hosts_info:
tpls = self.filter_tpls(
host=host,
templates=self.if_tpl_exist(tpl_names=self.templates),
type_="del"
)
if tpls:
params = {
"hostid": host.get("hostid"),
"templates_clear": self.get_templates_info(tpls)
}
self._zapi.update_host(params)
logging.info(
"\033[32m成功更新主机 '%s',删除模板 => '%s'\033[0m",
host.get("host"),
"、".join([tpl for tpl in tpls if tpls])
)
show(self.hosts_info)
class UseTemplate(BaseTool):
def __init__(self, args):
super().__init__(args)
self.templates = args.use
self.use()
def use(self):
"""
替换全部原来的 Zabbix 主机模板:
:return:
"""
for host in self.hosts_info:
tpls = self.if_tpl_exist(tpl_names=self.templates)
if tpls:
params = {
"hostid": host.get("hostid"),
"templates": self.get_templates_info(tpls)
}
self._zapi.update_host(params)
logging.info(
"\033[32m成功更新主机 '%s'\033[0m",
host.get("host")
)
show(self.hosts_info)
class ReplaceTemplate(BaseTool):
def __init__(self, args):
super().__init__(args)
self.templates = args.replace
self.instead_templates = args.to
self.replace()
def replace(self):
"""
替换 Zabbix 主机模板:
1. 被替换的模板必须已经存在于 Zabbix 主机中;
2. 要替换的模板不能已经存在于 Zabbix 主机中。
:return:
"""
for host in self.hosts_info:
tpls_del = self.filter_tpls(host=host, templates=self.templates, type_="rep")
tpls_add = self.filter_tpls(
host=host,
templates=self.if_tpl_exist(tpl_names=self.instead_templates),
type_="add"
)
if tpls_del and tpls_add:
new_templates = list(
filter(
lambda tpl: tpl not in self.get_templates_info(self.templates),
host.get("parentTemplates")
)
)
new_templates += self.get_templates_info(self.instead_templates)
params = {"hostid": host.get("hostid"), "templates": new_templates}
self._zapi.update_host(params)
logging.info(
"\033[32m成功更新主机 '%s', 替换模板 '%s' => 新模板 '%s'\033[0m",
host.get("host"),
"、".join([tpl for tpl in self.templates if self.templates]),
"、".join([tpl for tpl in self.instead_templates if self.instead_templates])
)
show(self.hosts_info)
def main(args):
# 显示模板信息
if args.list:
ListTemplate(args)
# 添加模板
if args.add:
AddTemplate(args)
# 移除指定模板
if args.clear:
ClearTemplate(args)
# 替换全部模板
if args.use:
UseTemplate(args)
# 替换指定模板
if args.replace:
if not args.to:
parser.print_help()
logging.error("the argument --to is required")
sys.exit(1)
ReplaceTemplate(args)
parser = argparse.ArgumentParser(description="(list|add|del) zabbix hosts templates")
parser.add_argument(
"--hosts",
nargs="+",
help="specific zabbix hosts"
)
parser.add_argument(
"-g",
"--groups",
nargs="+",
help="specific zabbix hostgroups"
)
parser.add_argument(
"-t",
"--to",
nargs="+",
help="specific templates names instead to"
)
opt_group = parser.add_mutually_exclusive_group(required=True)
opt_group.add_argument(
"-l",
"--list",
action="store_true",
help="list specific host templates"
)
opt_group.add_argument(
"-a",
"--add",
nargs="+",
help="add specific host templates"
)
opt_group.add_argument(
"-c",
"--clear",
nargs="+",
help="del specific host templates"
)
opt_group.add_argument(
"-u",
"--use",
nargs="+",
help="use specific host templates"
)
opt_group.add_argument(
"-r",
"--replace",
nargs="+",
help="replaced specific host templates"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/hosttpl.py | hosttpl.py |
import argparse
import sys
import os
from functools import reduce
import logging
import copy
from ast import literal_eval
from lib.utils.zbxapis import ZabbixApiUpdate
from lib.utils.format import pretty_tbl, get_value, jmes_search
from lib.utils.wxapis import WxWorkApi
def show(zbx_users: list):
"""
打印出配置有企业微信告警媒介的用户信息:
:param zbx_users:
:return:
"""
tbl = pretty_tbl(
title="Zabbix用户企业微信账号对照",
field_names=["Zabbix Userid", "Zabbix User Fullname", "Zabbix User Sendto"],
rows=[
[
zbx_user.get("username"),
zbx_user.get("fullname") if zbx_user.get("fullname") else "",
zbx_user.get("output_sendto") if zbx_user.get("output_sendto") else ""
]
for zbx_user in zbx_users
if zbx_users
]
)
tbl.align["Zabbix Userid"] = "l"
tbl.align["Zabbix User Fullname"] = "l"
tbl.align["Zabbix User Sendto"] = "l"
print(tbl)
class SyncWeworkMedia:
def __init__(self, zapi, corpid, agentid, secret, depart_name,
zbx_usrgrps, zbx_username, extra_media_type):
self._zapi = ZabbixApiUpdate(zapi)
self._corpid = corpid
self._agentid = agentid
self._secret = secret
self._zbx_usrgrps = zbx_usrgrps
self._depart_name = depart_name
self._zbx_username = zbx_username
self._extra_media_type = extra_media_type
def get_media_attr(self, media_type_name: str, attr: str):
"""
获取指定的用户告警媒介信息:
:param media_type_name:
:param attr:
:return:
"""
media_type_id = jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_MEDIATYPE_ID",
raw=True
) % media_type_name,
data=self._zapi.get_medias(output=["name"])
)
return {media_type_id: attr}
@property
def media_attrs(self):
"""
获取用户告警媒介的指定信息:
:return:
"""
media_attrs = {}
wework_media_type_ids = []
for item in literal_eval(get_value(section="ZABBIX", option="MediaAttrList")):
media_attr = self.get_media_attr(
media_type_name=item.get("media_type_name"),
attr=item.get("attr")
)
media_attrs.update(media_attr)
if item["kind"] == "wework" and \
item.get("media_type_name") == self._extra_media_type:
wework_media_type_ids.extend(list(media_attr.keys()))
return media_attrs, wework_media_type_ids[0] if wework_media_type_ids else ""
@property
def zbx_users(self):
"""
获取指定的用户信息:
:return:
"""
zbx_users = self._zapi.get_zbx_users(
output=["alias", "name", "surname"],
usrgrpids=jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_USERGROUP_IDS"
),
data=self._zapi.get_usr_grps(
filter_={"name": self._zbx_usrgrps},
output=["name"]
)
),
filter_={"username": self._zbx_username},
selectmedias=["mediatypeid", "sendto", "active", "severity", "period"],
selectmediatypes=["mediatypeid"]
)
for zbx_user in zbx_users:
medias = zbx_user.get("medias")
for media in medias:
mediatypeid = media.get("mediatypeid")
# {"1": "email", "5": "mobile", "12": "wework_id", "16": "wework_id"}
if mediatypeid in self.media_attrs[0]:
attr = self.media_attrs[0].get(mediatypeid)
send = media.get("sendto")
sendto = send if isinstance(send, list) else [send]
if zbx_user.get(attr):
zbx_user[attr] += sendto
else:
zbx_user[attr] = sendto
zbx_user["medias"] = medias
del zbx_user["mediatypes"]
return zbx_users
def match_wework_userid(self, zbx_user: dict):
"""
1. 从 zabbix 用户告警媒介中,通过报警媒介类型, 提取到用户的手机号码、邮箱、姓名等信息;
2. 通过多种途径匹配到该用户在企业微信的 userid;
3. 优先通过手机号码匹配, 如用户无手机号码或手机号码匹配不到,再依次通过其他途径匹配;
4. 最终匹配到企业微信的 userid 的用户, 新建或更新报警媒介。
:param zbx_user:
:return:
"""
match_funcs = [
# 通过手机号码匹配
lambda z_user, w_user: w_user.get("mobile") in z_user.get("mobile", []),
# 通过 surname + name 匹配
lambda z_user, w_user: z_user.get("fullname") == w_user.get("name"),
# 通过 name + surname 匹配
lambda z_user, w_user: z_user.get("fullname_reverse") == w_user.get("name"),
# 通过邮箱匹配
lambda z_user, w_user: w_user.get("email") in z_user.get("email", [])
]
wework_users = WxWorkApi(
corpid=self._corpid,
agentid=self._agentid,
secret=self._secret
).get_dep_users(self._depart_name)
for match_func in match_funcs:
result = [
user
for user in wework_users
if wework_users and match_func(zbx_user, user)
]
if result:
return result[0].get("userid")
def add_user_wework_media(self, zbx_user: dict, update=False, prefix=False):
"""
为 zabbix 用户添加企业微信告警媒介。
update: 如用户已经存在企业微信告警媒介, 且原 userid 与获取到的 userid 不一致,
值为 False 则不做处理,
值为 True 则更新为获取到的 userid。
"""
wework_userid = self.match_wework_userid(zbx_user)
if not wework_userid:
logging.info(
"\033[33m同步失败: Zabbix user '%s' 未找到对应的企业微信账号\033[0m",
zbx_user.get("username")
)
return
zbx_user_medias = zbx_user.get("medias")
zbx_user_medias_copy = copy.deepcopy(zbx_user.get("medias"))
sendto = f"{self._corpid}_{wework_userid}" if prefix else wework_userid
add_media = {
"mediatypeid": "",
"sendto": sendto,
"active": get_value(section="WEWORK", option="WEWORK_ACTIVE"),
"severity": str(
sum(literal_eval(get_value(section="WEWORK", option="WEWORK_SEVERITY")))
),
"period": get_value(section="WEWORK", option="WEWORK_PERIOD")
}
# zabbix user 已经有 wework 报警媒介
typeid = self.media_attrs[1]
wework_media = jmes_search(
jmes_rexp=get_value(
section="JMES",
option="SEARCH_WEWORK_MEDIA",
raw=True
) % typeid,
data=zbx_user_medias
)
# [{"mediatypeid": "", "sendto": "", "active": "", "severity": "", "period": ""}]
if wework_media and not jmes_search(
jmes_rexp=get_value(section="JMES", option="SEARCH_WEWORK_SENDTO", raw=True) % sendto,
data=wework_media
):
for media in wework_media:
sendto = media.get("sendto")
add_media.update({"mediatypeid": typeid})
zbx_user_medias.append(add_media)
# 企业微信 id 和企业微信用户 id 使用 "_" 进行分割,但是考虑到用户 id 中带有 "_" 的情况,
# 因而指定分割次数,即 "maxsplit=1"
wework_split = sendto.split("_", maxsplit=1)
# 当 zabbix user 已经有了相应的 wework 告警媒介,但是此用户属于另一个企业时,需要再次添加
# 考虑到企业微信用户名称中可能带有 "_" 的情况,"maxsplit=1" 指定根据匹配到的第一个 "_" 进行分割
if sendto and len(wework_split) == 2 and wework_split[0] != self._corpid and prefix:
add_media.update({"mediatypeid": typeid})
zbx_user_medias.append(add_media)
if update and sendto:
media.update(
{
"sendto": f"{wework_split[0]}_{wework_userid}" if
sendto and len(wework_split) == 2 else wework_userid
}
)
logging.info(
"\033[32m成功更新企业微信userid:Zabbix userid => '%s', "
"WeWork userid => '%s'\033[0m",
zbx_user.get("username"),
wework_userid
)
if not wework_media:
add_media.update({"mediatypeid": typeid})
zbx_user_medias.append(add_media)
# 对要更新的用户 medias 列表进行去重,防止重复添加
distinct_zbx_user_medias = []
if zbx_user_medias:
for media in zbx_user_medias:
if media not in distinct_zbx_user_medias:
distinct_zbx_user_medias.append(media)
if distinct_zbx_user_medias != zbx_user_medias_copy:
self._zapi.update_user(
{
"userid": zbx_user.get("userid"),
"medias": distinct_zbx_user_medias
}
)
logging.info(
"\033[32m同步成功: Zabbix user: '%s', WeWork userid: '%s'\033[0m",
zbx_user.get("username"),
wework_userid
)
return add_media.get("sendto")
def main(args):
"""
:param args:
:return:
"""
corpid = args.corpid
secret = args.secret
agentid = args.agentid
if args.env:
corpid = corpid if corpid else os.environ.get("WEWORK_CORPID")
secret = secret if secret else os.environ.get("WEWORK_SECRET")
agentid = agentid if agentid else os.environ.get("WEWORK_AGENTID")
if corpid and secret and agentid:
worker = SyncWeworkMedia(
zapi=args.zapi,
corpid=corpid,
agentid=agentid,
secret=secret,
depart_name=args.depart_name,
zbx_usrgrps=reduce(lambda x, y: x + y, args.usergroups) if args.usergroups else [],
zbx_username=args.username,
extra_media_type=args.media_type
)
zbx_users = worker.zbx_users
for user in zbx_users:
sendto = worker.add_user_wework_media(
zbx_user=user,
update=args.allow_update,
prefix=args.allow_prefix
)
user["output_sendto"] = sendto
show(zbx_users)
else:
parser.print_help()
logging.error("\033[31m缺乏必要参数:'corpid' or 'secret' or 'agentid'\033[0m")
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--corpid", required=True, help="企业微信的企业ID")
parser.add_argument("-t", "--secret", required=True, help="企业微信内应用的Secret")
parser.add_argument("-a", "--agentid", required=True, help="企业微信内应用的ID")
parser.add_argument("-d", "--depart_name", required=True, help="指定企业微信中部门名称")
parser.add_argument("-e", "--env", action="store_true", help="从环境变量中读取参数")
parser.add_argument("-g", "--usergroups", nargs="+", action="append", help="指定更新的zabbix用户组")
parser.add_argument("-u", "--username", help="指定更新的zabbix用户")
parser.add_argument("-m", "--media_type", required=True, help="指定zabbix中企业微信的告警媒介")
parser.add_argument("--allow-update", action="store_true", help="当zabbix user已存在企业微信告警媒介, \
但sendto字段与获取的企业微信userid不一致, 是否允许更新")
parser.add_argument("--allow-prefix", action="store_true", help="是否加上企业微信的企业id作为前缀,\
如'ww438e13e211d83d51_ChenHuiPing'")
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/sync_wework_media.py | sync_wework_media.py |
import argparse
import logging
import sys
from lib.utils.ldapapis import Ldap
from lib.utils.format import get_value
from lib.utils.zbxapis import ZabbixApiCreate
class ZbxGrps:
def __init__(self, zapi, ldap):
self._zapi = ZabbixApiCreate(zapi)
self._ldap = ldap
@property
def zbx_hostgrps(self):
zbx_hostgrps = self._zapi.get_ht_grps(
output=["name", "groupid"],
filter_={"flags": 0},
monitored_hosts=True
)
return zbx_hostgrps
@property
def zbx_usergrps(self):
zbx_usergrps = self._zapi.get_usr_grps(
searchwildcardsenabled=True,
selectusers=["userid", "username", "name"],
output=["username", "name"]
)
return zbx_usergrps
def create_usrgrps_by_htgrps(self):
usrgrps = [
usergrp.get("name").replace(" admins", "")
for usergrp in self.zbx_usergrps
]
zbx_htgrps_without_usrgrp = list(
filter(
lambda grp: grp.get("name") not in usrgrps,
self.zbx_hostgrps
)
)
if zbx_htgrps_without_usrgrp:
for htgrp in zbx_htgrps_without_usrgrp:
self._zapi.create_usrgrp(
grp_name=htgrp.get("name") + " admins",
groupid=htgrp.get("groupid"),
# 3 - read-write access
permission=3
)
logging.info(
"\033[32m成功创建Zabbix用户组 '%s'\033[0m",
htgrp.get("name") + " admins"
)
def create_ldap_usrgrps(self):
for zbx_grp in self.zbx_usergrps:
unique_members = []
# 判断 zabbix 用户是否存在于 ldap 中, 添加至成员列表
for user in zbx_grp.get("users"):
ldap_usr = self._ldap.search_user(
dn=get_value(section="LDAP", option="LDAP_USER_DN"),
filter_=f"(uid={user.get('username')})"
)
if ldap_usr:
unique_members.append(ldap_usr)
# 特殊字符需进行处理
zbx_grp_name = zbx_grp.get("name").replace("(", "\\28").replace(")", "\\29")
ldap_usrgrp = self._ldap.search_usergrp(
dn=get_value(section="LDAP", option="LDAP_USER_GROUP_DN"),
filter_=f"(cn={zbx_grp_name})"
)
if ldap_usrgrp:
self._ldap.update_member(ldap_usrgrp[0], unique_members)
logging.info("\033[32m成功更新LDAP用户组 '%s'\033[0m", ldap_usrgrp[0])
else:
ldap_cn = f'cn={zbx_grp_name},' \
f'{get_value(section="LDAP", option="LDAP_USER_GROUP_DN")}'
self._ldap.create_usergrp(dn=ldap_cn, member=unique_members)
logging.info("\033[32m成功创建LDAP用户组 '%s'\033[0m", ldap_cn)
def main(args):
ldap = Ldap(
host=args.ldap_server,
user=args.ldap_user,
passwd=args.ldap_password,
port=args.ldap_port
)
instance_ = ZbxGrps(args.zapi, ldap)
# 创建 Zabbix 用户组
if args.create_zbx_usrgrp:
instance_.create_usrgrps_by_htgrps()
# 清除 ldap 中 zabbix 用户组
if args.clean:
if not args.ldap_server or not args.ldap_user or not args.ldap_password:
parser.print_help()
logging.error("the argument --ldap-server/--ldap-user/--ldap-password is required")
sys.exit(1)
if args.ldap_server and args.ldap_user and args.ldap_password:
ldap.clean_usergrp(dn=get_value(section="LDAP", option="LDAP_USER_GROUP_DN"))
# 更新/新建 ldap group
if args.create_ldap_group:
if not args.ldap_server or not args.ldap_user or not args.ldap_password:
parser.print_help()
logging.error("the argument --ldap-server/--ldap-user/--ldap-password is required")
sys.exit(1)
if args.ldap_server or args.ldap_user or args.ldap_password:
instance_.create_ldap_usrgrps()
parser = argparse.ArgumentParser()
parser.add_argument("--create-ldap-group", action="store_true")
parser.add_argument("--create-zbx-usrgrp", action="store_true")
parser.add_argument("-c", "--clean", action="store_true")
parser.add_argument("-s", "--ldap-server", help="ldap server ip address")
parser.add_argument("-o", "--ldap-port", default=389, help="ldap server port")
parser.add_argument("-u", "--ldap-user", help="ldap bind user")
parser.add_argument("-p", "--ldap-password", help="ldap password")
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/ldap_usergrp.py | ldap_usergrp.py |
import argparse
import logging
from lib.utils.zbxapis import ZabbixApiUpdate, ZabbixApiCreate
from lib.utils.format import get_value
from lib.utils.zbxtags import ItemTags
class ZbxCalculatedItems:
def __init__(self, zapi):
self._zapi = zapi
def get_fs_size_items(self, hostid: str):
"""
获取带有 "vfs.fs.size" 关键字的全部监控项:
1. 监控项主要分为 "used"、"pused"、"total" 三类;
:param hostid:
:return:
"""
return self._zapi.get_items(
hostids=[hostid],
search={
"key_": get_value(
section="ZABBIX",
option="FS_SIZE_ITEM_KEY"
)
},
selectinterfaces=["ip"],
selecthosts=["host", "status", "tags"],
selecttags="extend"
)
@staticmethod
def gen_formula(mode: str, items: list, extra="pused"):
"""
生成 "Calculated" 类型的 item 的表达式:
1. 表达式由三部分构成 func(/IP ADDRESS/ITEM KEY),即函数、ip 和 item key;
:param mode:
:param items:
:param extra:
:return:
"""
formula = "+".join(
[
"last(/" + item.get("hosts")[0].get("host") + "/" + item.get("key_") + ")"
for item in items
if item.get("hosts")
if mode in item.get("key_") and extra not in item.get("key_")
]
)
return formula if formula else "0"
def update_calculated_item(api, get_items: list, mode: str, fs_size_items: list):
"""
如果已经存在 "Calculated" 类型的 item 则更新它:
1. 因为重复添加已经存在的 item 会报错,所以当 item 已经存在时则更新它或者不做操作;
:param api:
:param get_items:
:param mode:
:param fs_size_items:
:return:
"""
instance_ = ZbxCalculatedItems(api)
for item in get_items:
item_tags = ItemTags(item.get("tags")).added_item_tags(
tag_name="Application",
tag_value="Disk calculated"
)
if item.get("params") != instance_.gen_formula(mode, fs_size_items) or \
item.get("tags") != item_tags:
api.update_item(
{
"itemid": item.get("itemid"),
"params": instance_.gen_formula(mode, fs_size_items),
"tags": ItemTags(item.get("tags")).added_tags(
tag_name="Application",
tag_value="Disk calculated"
)
}
)
logging.info(
"\033[32m主机 '%s' 成功更新监控项: '(ItemID)%s' => '(ItemName)%s'\033[0m",
item.get("hosts")[0].get("host"),
item.get("itemid"),
item.get("name")
)
else:
logging.info(
"\033[33m主机 '%s' 监控项未发生改变:'(ItemID)%s' => '(ItemName)%s'\033[0m",
item.get("hosts")[0].get("host"),
item.get("itemid"),
item.get("name")
)
def create_calculated_item(api, host: dict, item_name: str,
item_key: str, mode: str):
"""
创建 "Calculated" 类型的 item:
1. Zabbix 没有直接获取总磁盘和已用磁盘空间的监控项,只有各挂载的文件系统的空间使用情况的监控项;
2. 因此在各挂载文件系统监控项的基础上创建一个汇总的 Calculated 监控项;
3. 涉及的监控项为 vfs.fs.size[fs,total] 和 vfs.fs.size[fs,used];
4. 创建的计算监控项 key 为 vfs.fs.totalsize 和 vfs.fs.usedsize。
:param api:
:param host:
:param item_name:
:param item_key:
:param mode:
:return:
"""
instance_ = ZbxCalculatedItems(api)
result = api.create_item(
delay=3600,
hostid=host.get("hostid"),
key_=item_key,
name=item_name,
type_=15,
value_type=3,
data_type=0,
units="B",
params=instance_.gen_formula(
mode,
instance_.get_fs_size_items(host.get("hostid"))
),
tags=[{"tag": "Application", "value": "Disk calculated"}]
)
logging.info(
"\033[32m主机 '%s' 成功创建监控项 '%s'\033[0m",
host.get("name"),
item_name
)
return result
def calculated_disk(api):
"""
执行更新/创建 "Calculated" 类型 item 的操作:
:return:
"""
instance_ = ZbxCalculatedItems(ZabbixApiUpdate(api))
hosts = ZabbixApiUpdate(api).get_hts(
output=["hostid", "name"],
filter_={"available": 1, "status": 0},
searchinventory={"os_short": ["Linux", "Windows"]},
searchbyany=True
)
for host in hosts:
fs_size_items = instance_.get_fs_size_items(host.get("hostid"))
total_disk_items = ZabbixApiUpdate(api).get_items(
hostids=host.get("hostid"),
filter_={"name": get_value(section="ZABBIX", option="TOTAL_ITEM_NAME")},
selecthosts=["host", "status", "tags"],
selecttags="extend"
)
if len(total_disk_items) == 0:
create_calculated_item(
host=host,
item_name=get_value(section="ZABBIX", option="TOTAL_ITEM_NAME"),
item_key=get_value(section="ZABBIX", option="TOTAL_ITEM_KEY"),
mode="total",
api=ZabbixApiCreate(api)
)
else:
update_calculated_item(
get_items=total_disk_items,
mode="total",
fs_size_items=fs_size_items,
api=ZabbixApiUpdate(api)
)
used_disk_items = ZabbixApiUpdate(api).get_items(
hostids=host.get("hostid"),
filter_={"name": get_value(section="ZABBIX", option="USED_ITEM_NAME")},
selecthosts=["host", "status", "tags"],
selecttags="extend"
)
if len(used_disk_items) == 0:
create_calculated_item(
host=host,
item_name=get_value(section="ZABBIX", option="USED_ITEM_NAME"),
item_key=get_value(section="ZABBIX", option="USED_ITEM_KEY"),
mode="used",
api=ZabbixApiCreate(api)
)
else:
update_calculated_item(
get_items=used_disk_items,
mode="used",
fs_size_items=fs_size_items,
api=ZabbixApiUpdate(api)
)
def main(args):
"""
在各主机上创建总磁盘空间和已用磁盘空间两个监控项:
:param args:
:return:
"""
calculated_disk(api=args.zapi)
parser = argparse.ArgumentParser()
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/fs_calc.py | fs_calc.py |
import argparse
import logging
from lib.utils.format import pretty_tbl
def show(zbx_users: list):
tbl = pretty_tbl(
title="Zabbix用户告警媒介对照",
field_names=["Zabbix Userid", "Zabbix User Fullname", "Zabbix User Medias"],
rows=[
[
zbx_user.get("username"),
zbx_user.get("fullname") if zbx_user.get("fullname") else "",
"/".join(zbx_user.get("medias"))
]
for zbx_user in zbx_users
if zbx_users
]
)
tbl.align["Zabbix Userid"] = "l"
tbl.align["Zabbix User Fullname"] = "l"
tbl.align["Zabbix User Medias"] = "l"
print(tbl)
def delete_user_medias(zapi, medias: list):
# 根据用户的输入的 media 列表直接通过 zabbix api 进行过滤
medias = zapi.mediatype.get(
{
"output": ["name"],
"filter": {"name": medias}
}
)
mediatype_ids = [media.get("mediatypeid") for media in medias if medias]
zbx_users = zapi.user.get(
{
"output": ["userid", "username"],
"selectMedias": ["mediatypeid", "sendto", "active", "severity", "period"],
"mediatypeids": mediatype_ids
}
)
for user in zbx_users:
zapi.user.update(
{
"userid": user.get("userid"),
"medias": [
media for media in user.get("medias")
if media.get("mediatypeid") not in mediatype_ids
],
}
)
logging.info(
"\033[32m成功更新Zabbix用户medias:Zabbix userid => '%s'\033[0m",
user.get("username")
)
def main(args):
zapi = args.zapi
delete_user_medias(
zapi=zapi,
medias=args.media
)
zbx_users = zapi.user.get(
{
"output": ["userid", "alias", "name", "surname"],
"selectMedias": ["mediatypeid"]
}
)
for user in zbx_users:
mediatype_ids = [
media.get("mediatypeid")
for media in user.get("medias") if user.get("medias")
]
if mediatype_ids:
mediatype_names = zapi.mediatype.get(
{
"output": ["name"],
"filter": {"mediatypeid": mediatype_ids}
}
)
user["medias"] = [name.get("name") for name in mediatype_names if mediatype_names]
show(zbx_users=zbx_users)
parser = argparse.ArgumentParser(
description="Delete the media types that user do not use"
)
parser.add_argument(
"-m",
"--media",
required=True,
type=str,
nargs="+",
help="user media type to delete"
)
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/delete_user_media.py | delete_user_media.py |
import argparse
import json
import os
import logging
import ldap3
from lib.utils.zbxapis import ZabbixApiUpdate
from lib.utils.format import get_value
from lib.utils.ldapapis import Ldap
def get_hostgrp_users(zapi):
"""
查询所有以 "admins" 结尾的用户组的用户:
1. 用户组和主机组有约定的对应关系,如主机组名为 "XYZ",则主机组对应的用户组为 "XYZ admins";
2. 由于 host inventory 中只能保存两个负责人信息(poc_1, poc_2),取用户组中的前两个用户。
:param zapi:
:return:
"""
grp_ldap_users = dict()
usrgrps = zapi.get_usr_grps(
selectusers=["userid", "name", "surname"],
output=["usrgrpid", "name"],
search={"name": "* admins"},
searchWildcardsEnabled=True
)
for usrgrp in usrgrps:
grp_name = usrgrp.get("name").rsplit(" ", maxsplit=1)[0]
grp_ldap_users[grp_name] = {"GroupName": grp_name}
for i in range(min(len(usrgrp.get("users")), 2)):
ldap_cn = f"{usrgrp.get('users')[i].get('name')} " \
f"{usrgrp.get('users')[i].get('surname')}"
grp_ldap_users[grp_name][f"poc_{i + 1}_dn"] = \
f"cn={ldap_cn},{get_value(section='LDAP', option='LDAP_USER_DN')}"
return grp_ldap_users
def main(args):
"""main function"""
contacts = dict()
zapi = ZabbixApiUpdate(args.zapi)
ldap = Ldap(
host=args.ldap_server,
port=args.ldap_port,
user=args.ldap_user,
passwd=args.ldap_password
)
if os.path.exists(args.contacts_file):
with open(file=args.contacts_file, mode="r", encoding="utf8") as f_obj:
for info in json.load(f_obj)["HostGroup"]:
contacts[info.get("GroupName")] = info
htgrp_users = get_hostgrp_users(zapi)
htgrp_users.update(contacts)
htgrps = zapi.get_ht_grps(
filter_={"name": list(htgrp_users.keys())},
output=["groupid", "name"],
selecthosts=["hostid"]
)
for htgrp in htgrps:
contact = htgrp_users.get(htgrp.get("name"), {})
inventory = dict()
for i in [1, 2]:
ldap_dn = f"poc_{i}_dn"
if ldap_dn in contact:
poc_info = ldap.search_user(
dn=contact.get(ldap_dn),
filter_="(objectClass=*)",
search_scope=ldap3.BASE,
results="attributes"
)
if poc_info:
inventory[f"poc_{i}_name"] = \
"".join(poc_info.get("sn", "") + poc_info.get("givenName", ""))
inventory[f"poc_{i}_email"] = ",".join(poc_info.get("mail", ""))
inventory[f"poc_{i}_phone_a"] = poc_info.get("telephoneNumber", [""])[0]
inventory[f"poc_{i}_phone_b"] = poc_info.get("telephoneNumber", [""])[-1]
inventory[f"poc_{i}_cell"] = ",".join(poc_info.get("mobile", ""))
inventory[f"poc_{i}_screen"] = ",".join(poc_info.get("uid", ""))
zapi.mass_update_host(
{
"hosts": htgrp.get("hosts"),
"inventory_mode": 1,
"inventory": inventory
}
)
logging.info("\033[32m更新POC信息成功,主机组 -> '%s'\033[0m", htgrp.get("name"))
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--contacts-file", required=True, help="HostGroup contacts file")
parser.add_argument("-l", "--ldap-server", required=True, help="ldap server ip address")
parser.add_argument("-o", "--ldap-port", default=389, help="ldap server port")
parser.add_argument("-u", "--ldap-user", required=True, help="ldap bind user")
parser.add_argument("-p", "--ldap-password", required=True, help="ldap password")
parser.set_defaults(handler=main) | zbxtool-cfomp | /zbxtool_cfomp-0.11.12-py3-none-any.whl/lib/commands/update_hostgrp_poc.py | update_hostgrp_poc.py |
import datetime as python_datetime
def is_aware(time):
return time.utcoffset() is not None
def get_current_timezone():
from django.utils.timezone import get_current_timezone
return get_current_timezone()
def now(tz=None):
"""
Just like django.utils.timezone.now(), except:
Takes a timezone as a param and defaults to non-utc
"""
import pytz
from django.conf import settings
if settings.USE_TZ:
tz = _get_tz(tz)
now_dt = python_datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
return localtime(now_dt, tz=tz)
else:
return python_datetime.datetime.now()
def get_next_weekday(weekday, tz=None):
tnow = now(tz)
weekdays = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6
}
weekday_num = weekdays[weekday.lower()]
days_ahead = weekday_num - tnow.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return tnow + python_datetime.timedelta(days_ahead)
def localtime(value, tz=None):
"""
Converts an aware datetime.datetime to a given time zone, defaults to currently activated time zone
This method is taken almost directly from a later version of django.
WARNING: In some cases (like with time math) normalize can cause times to shift by an hour
when correcting the offset.
"""
tz = _get_tz(tz)
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(tz)
if hasattr(tz, 'normalize'):
# This method is available for pytz time zones.
value = tz.normalize(value)
return value
def deactivate():
from django.utils.timezone import deactivate
deactivate()
def activate(value):
"""
Override of django.utils.timezone.activate, but will first check
if the value has a get_timezone method. If it does, the this method
will use the value returned by get_timezone
"""
import pytz
if hasattr(value, 'get_timezone'):
value = value.get_timezone()
if isinstance(value, str):
value = pytz.timezone(value)
assert isinstance(value, python_datetime.tzinfo), 'Value passed was not tzinfo, it was: %s' % type(value)
from django.utils.timezone import activate as timezone_activate
return timezone_activate(value)
def is_daylight_savings_time(value):
"""
Determines if the value is in daylight savings time.
Can either take an aware datetime or a timezone object
"""
new_datetime = _get_datetime_from_ambiguous_value(value)
return new_datetime.dst() != python_datetime.timedelta(0)
def get_timezone_name(value):
"""
Returns the current timezone name (PDT/PST).
"""
return _get_datetime_from_ambiguous_value(value).strftime('%Z')
def get_timezone_offset(value):
"""
Returns the current timezone offset (-0800).
"""
return _get_datetime_from_ambiguous_value(value).strftime('%z')
def timezone_abbrv_mappings():
"""
By default, dateutil doesn't parse at least `EDT` correctly.
Pass output of this function as `tzinfos` param to parse() if it isn't pickin up timezone correctly.
"""
from dateutil.tz import gettz
return {'EDT': gettz('America/New_York'),
'EST': gettz('America/New_York'),
'CDT': gettz('America/Chicago'),
'CST': gettz('America/Chicago'),
'MDT': gettz('America/Denver'),
'MST': gettz('America/Denver'),
'PDT': gettz('America/Los_Angeles'),
'PST': gettz('America/Los_Angeles')}
def _get_datetime_from_ambiguous_value(value):
if isinstance(value, python_datetime.datetime):
new_datetime = localtime(value, tz=value.tzinfo)
elif isinstance(value, python_datetime.tzinfo):
new_datetime = now(tz=value)
else:
raise Exception('value was not a timezone or a date, it was: %s' % type(value))
return new_datetime
def combine(date, time, tz=None):
"""
Like datetime.datetime.combine, but make it aware.
Prefers timzeone that is passed in, followed by time.tzinfo, and then get_current_timezone
"""
from django.utils.timezone import make_aware
if tz is None:
tz = time.tzinfo
tz = _get_tz(tz)
combined = python_datetime.datetime.combine(date, time)
return combined if is_aware(combined) else make_aware(combined, tz)
def parse(date_string, **kwargs):
""" A wrapper around python-dateutil's parse function which ensures it always returns an aware datetime """
from dateutil.parser import parse as datetime_parser
from django.utils.timezone import make_aware
parsed = datetime_parser(date_string, **kwargs)
# Make aware
parsed = parsed if is_aware(parsed) else make_aware(parsed, _get_tz())
# Ensure that we have the correct offset, while also keeping what was passed in.
original = parsed
parsed = localtime(parsed, tz=parsed.tzinfo).replace(
year=original.year,
month=original.month,
day=original.day,
hour=original.hour
)
return parsed
def datetime(year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""
A wrapper around datetime.datetime(), but ensures that the returned datetime is always
timezone aware.
"""
from django.utils.timezone import is_naive, make_aware
tzinfo = _get_tz(tzinfo)
dt = python_datetime.datetime(year, month, day, hour, minute, second, microsecond, tzinfo)
if is_naive(dt):
dt = make_aware(dt, tzinfo)
dt = localtime(dt, tz=tzinfo) # Have to set the correct offset
# Setting the offset may have changed something else, like the hour, so replace
return dt.replace(
year=year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)
def datetime_min():
""" Returns datetime.datetime.min, but timezone aware """
from django.utils.timezone import get_default_timezone
return python_datetime.datetime.min.replace(tzinfo=get_default_timezone())
def datetime_max():
""" Returns datetime.datetime.max, but timezone aware """
from django.utils.timezone import get_default_timezone
return python_datetime.datetime.max.replace(tzinfo=get_default_timezone())
def math(date, op, delta, keep_hour=False):
"""
Performs operator math on datetime and timezone objects.
This is needed when crossing daylight savings time thresholds to maintain
the correct offset.
WARNING FOR NON-UTC DATETIMES:
If the daylight savings time threshold is crossed, the hour could change from under you.
If this is not desired behaviour, pass in keep_hour=True.
For example, if you have 7/1/2014 at midnight and you add 180 days to it, and keep_hour=False,
it will return 12/27/2014 at 11 p.m. -- NOT 12/28/2014 at midnight like you might would expect.
This is caused by pytz.normalize method.
"""
converted = op(date, delta)
original = converted
converted = localtime(converted, tz=converted.tzinfo) # Need to localize to get the timezone offset correct
if keep_hour:
if is_daylight_savings_time(date) != is_daylight_savings_time(converted):
# Crossed the DST threshold
# The hour doesn't change if datetime +/- timedelta
# But does change when crossing DST and localizing
converted = converted.replace(
year=original.year,
month=original.month,
day=original.day,
hour=original.hour
)
return converted
def javascript_iso_format(date):
import pytz
date = localtime(date, tz=pytz.utc)
return date.strftime('%Y-%m-%dT%H:%M:%S') + 'Z'
def monthly_iter(start, end):
"""
Iterates on a monthly basis, and wraps around dateutil.rrule
Example:
In [1]: from zc_common import timezone
In [2]: for m in timezone.monthly_iter(timezone.datetime(2014, 10, 1), timezone.now()):
...: print m
...:
2014-10-01 00:00:00-07:00
2014-11-01 00:00:00-07:00
In [3]: for m in timezone.monthly_iter(timezone.datetime(2014, 10, 19), timezone.now()):
...: print m
...:
2014-10-19 00:00:00-07:00
In [4]: timezone.now()
Out[4]: datetime.datetime(
2014, 11, 18, 16, 36, 54, 994666, tzinfo=<DstTzInfo 'America/Los_Angeles' PST-1 day, 16:00:00 STD>)
"""
from dateutil import rrule
for date in rrule.rrule(rrule.MONTHLY, dtstart=start, until=end):
yield date
def weekly_iter(start, end, day=False):
"""
Iterates weekly, wrapper around rrule.
In [2]: for w in timezone.weekly_iter(timezone.datetime(2014, 7, 1), timezone.datetime(2014, 7, 31)):
...: print w
2014-07-01 00:00:00-07:00
2014-07-08 00:00:00-07:00
2014-07-15 00:00:00-07:00
2014-07-22 00:00:00-07:00
2014-07-29 00:00:00-07:00
"""
from dateutil import rrule
if day:
while start.isoweekday() != day:
start = start + python_datetime.timedelta(days=1)
for date in rrule.rrule(rrule.WEEKLY, dtstart=start, until=end):
yield date
def to_start_of_month(time):
original_month = time.month
original_year = time.year
time.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
time = localtime(time, time.tzinfo)
# If the time crossed DST, the month and/or year may have changed.
return time.replace(year=original_year, month=original_month,
day=1, hour=0, minute=0, second=0, microsecond=0)
def to_end_of_month(time):
original_month = time.month
original_year = time.year
last_day = get_last_day_of_month(time)
time.replace(day=last_day, hour=23, minute=59, second=59, microsecond=999999)
time = localtime(time, time.tzinfo)
# If the time crossed DST, the month and/or year may have changed.
return time.replace(year=original_year, month=original_month,
day=last_day, hour=23, minute=59, second=59, microsecond=999999)
def to_start_of_day(time):
return time.replace(hour=0, minute=0, second=0, microsecond=0)
def to_end_of_day(time):
return time.replace(hour=23, minute=59, second=59, microsecond=999999)
def to_start_of_week(time):
day = time.isoweekday()
if day > 0:
start = time - python_datetime.timedelta(days=day - 1)
else:
start = time
return start
def to_end_of_week(time):
day = time.isoweekday()
if day > 0:
start = time + python_datetime.timedelta(days=7 - day)
else:
start = time
return start
def get_last_day_of_month(time):
import calendar
return calendar.monthrange(time.year, time.month)[1]
def is_business_day(time, include_weekends=True):
"""
Determines if the current date is not a holiday.
By default this includes weekends.
"""
from django.conf import settings
return not ((include_weekends and time.date().weekday() in [5, 6]) or # saturday, sunday
time.date() in settings.ZEROCATER_HOLIDAYS)
def _get_tz(tz=None):
# Always get the current timezone, unless something is passed in
from django.utils.timezone import get_current_timezone
return tz if tz else get_current_timezone()
# http://aboutsimon.com/2013/06/05/datetime-hell-time-zone-aware-to-unix-timestamp/
def convert_to_timestamp(dt):
import calendar
import pytz
if is_aware(dt):
if dt.tzinfo != pytz.utc:
dt = dt.astimezone(pytz.utc)
return calendar.timegm(dt.timetuple())
else:
raise Exception('Can only convert aware datetimes to timestamps')
def convert_from_timestamp(timestamp):
import pytz
return python_datetime.datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.utc) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/timezone.py | timezone.py |
import re
from distutils.util import strtobool
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.fields import ArrayField
from django.db.models import BooleanField, FieldDoesNotExist, ForeignKey
from django.db.models.fields.related import ManyToManyField
from django import forms
from django.utils import six
# DjangoFilterBackend was moved to django-filter and deprecated/moved from DRF in version 3.6
try:
from rest_framework.filters import DjangoFilterBackend, Filter
from rest_framework import filterset
except ImportError:
from django_filters.rest_framework import DjangoFilterBackend, filterset
from django_filters.rest_framework.filters import Filter
from django_filters.filters import ModelChoiceFilter
# remote_model() was removed from django_filters in 2.0
try:
try:
from rest_framework.compat import get_related_model as remote_model
except ImportError:
from django_filters.compat import remote_model
except:
pass
def remote_queryset(field):
# remote_model() was removed from django_filters in 2.0
try:
model = remote_model(field)
except:
model = field.remote_field.model
limit_choices_to = field.get_limit_choices_to()
return model._base_manager.complex_filter(limit_choices_to)
class ArrayFilter(Filter):
field_class = SimpleArrayField
@property
def field(self):
# This property needs to be overriden because filters.Filter does not instantiate field_class with any
# args by default, and SimpleArrayField requires an argument indicating the type of each element in the array
self._field = self.field_class(forms.CharField(), required=False)
return self._field
class JSONAPIFilterSet(filterset.FilterSet):
class Meta:
strict = True
filter_overrides = {
ArrayField: {
'filter_class': ArrayFilter,
'extra': lambda f: {
'lookup_expr': 'contains',
}
},
# Overrides default definition in django_filters to allow us to use our own definition of
# `remote_queryset`, which looks up allowable values via `_base_manager` rather than `_default_manager`
ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': remote_queryset(f),
}
},
}
class JSONAPIFilterBackend(DjangoFilterBackend):
default_filter_set = JSONAPIFilterSet
# This method takes the filter query string (looks something like ?filter[xxx]=yyy) and parses into parameters
# that django_filters can interface with.
#
# Handles:
# ?filter[id]=1
# ?filter[id__in]=1,2,3
# ?filter[price__gte]=100
# ?filter[relatedobject__relatedobject]=1
# ?filter[relatedobject__relatedobject__in]=1,2,3
# ?filter[delivery_days__contains]=true # filtering on ArrayField
# ?filter[active]=1 # filtering on Boolean values of 1, 0, true or false
def _parse_filter_string(self, queryset, filter_class, filter_string, filter_value):
filter_string_parts = filter_string.split('__')
if len(filter_string_parts) > 1:
field_name = '__'.join(filter_string_parts[:-1])
else:
field_name = filter_string_parts[0]
# Translates the 'id' in ?filter[id]= into the primary key identifier, e.g. 'pk'
if field_name == 'id':
primary_key = queryset.model._meta.pk.name
field_name = primary_key
try:
is_many_to_many_field = isinstance(getattr(queryset.model, filter_string).field, ManyToManyField)
if is_many_to_many_field:
filter_value = filter_value.split(',')
except AttributeError:
pass
try:
field_filter = filter_class.get_filters().get(field_name, None)
is_array_filter = isinstance(field_filter, ArrayFilter)
if is_array_filter:
filter_value = filter_value.split(',')
except AttributeError:
pass
# Allow 'true' or 'false' as values for boolean fields
try:
if isinstance(queryset.model._meta.get_field(field_name), BooleanField):
filter_value = bool(strtobool(filter_value))
except FieldDoesNotExist:
pass
filterset_data = {
'field_name': field_name,
'field_name_with_lookup': filter_string,
'filter_value': filter_value
}
return filterset_data
def filter_queryset(self, request, queryset, view):
filter_class = self.get_filter_class(view, queryset)
filters = []
for param, value in six.iteritems(request.query_params):
match = re.search(r'^filter\[(\w+)\]$', param)
if match:
filter_string = match.group(1)
parsed_filter_string = self._parse_filter_string(queryset, filter_class, filter_string, value)
filters.append(parsed_filter_string)
for filter_ in filters:
if filter_['field_name'] not in view.filter_fields.keys():
return queryset.none()
filterset_data = {filter_['field_name_with_lookup']: filter_['filter_value'] for filter_ in filters}
if filter_class:
return filter_class(filterset_data, queryset=queryset).qs
return queryset | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/filters.py | filters.py |
import copy
from collections import OrderedDict
import os
import ujson
import inflection
from django.db.models import Manager
from django.utils import six
from rest_framework import relations
from rest_framework.serializers import BaseSerializer, Serializer, ListSerializer
from rest_framework.settings import api_settings
from rest_framework_json_api import utils
from rest_framework_json_api import renderers
from zc_common.remote_resource.relations import RemoteResourceField
from zc_common.remote_resource import utils as zc_common_utils
from zc_events.exceptions import RequestTimeout
core_module_name = os.environ.get('DJANGO_SETTINGS_MODULE').split('.')[0]
core_module = __import__(core_module_name)
event_client = core_module.event_client
# `format_keys()` was replaced with `format_field_names()` from rest_framework_json_api in 3.0.0
def key_formatter():
try:
return zc_common_utils.format_keys
except AttributeError:
return utils.format_keys
class RemoteResourceIncludeError(Exception):
def __init__(self, field, data=None):
self.field = field
self.message = "There was an error including the field {}".format(field)
data['meta'] = {'include_field': field}
self.data = [data]
def __str__(self):
return self.message
class RemoteResourceIncludeTimeoutError(RemoteResourceIncludeError):
def __init__(self, field):
self.field = field
self.message = "Timeout error requesting remote resource {}".format(field)
self.data = [{
"status": "503",
"source": {
"pointer": "/data"
},
"meta": {
"include_field": field,
},
"detail": self.message
}]
class JSONRenderer(renderers.JSONRenderer):
"""
This is s modification of renderers in (v 2.2)
https://github.com/django-json-api/django-rest-framework-json-api
"""
@classmethod
def extract_attributes(cls, fields, resource):
"""
@amberylx 2020-01-10: Copied from djangorestframework-jsonapi v3.0.0 in order to override the call to
`utils.format_field_names(data)` to our own function of `format_keys()`, which is a copy of the library's
old (pre-v3.0) function.
"""
data = OrderedDict()
for field_name, field in iter(fields.items()):
# ID is always provided in the root of JSON API so remove it from attributes
if field_name == 'id':
continue
# don't output a key for write only fields
if fields[field_name].write_only:
continue
# Skip fields with relations
if isinstance(
field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)
):
continue
# Skip read_only attribute fields when `resource` is an empty
# serializer. Prevents the "Raw Data" form of the browsable API
# from rendering `"foo": null` for read only fields
try:
resource[field_name]
except KeyError:
if fields[field_name].read_only:
continue
data.update({
field_name: resource.get(field_name)
})
return key_formatter()(data)
@classmethod
def extract_included(cls, request, fields, resource, resource_instance, included_resources):
# this function may be called with an empty record (example: Browsable Interface)
if not resource_instance:
return
included_data = list()
current_serializer = fields.serializer
context = current_serializer.context
included_serializers = utils.get_included_serializers(current_serializer)
included_resources = copy.copy(included_resources)
included_resources = [inflection.underscore(value) for value in included_resources]
for field_name, field in six.iteritems(fields):
# Skip URL field
if field_name == api_settings.URL_FIELD_NAME:
continue
# Skip fields without relations or serialized data
if not isinstance(field, (relations.RelatedField, relations.ManyRelatedField, BaseSerializer)):
continue
try:
included_resources.remove(field_name)
except ValueError:
# Skip fields not in requested included resources
# If no child field, directly continue with the next field
if field_name not in [node.split('.')[0] for node in included_resources]:
continue
try:
relation_instance = getattr(resource_instance, field_name)
except AttributeError:
try:
# For ManyRelatedFields if `related_name` is not set we need to access `foo_set` from `source`
relation_instance = getattr(resource_instance, field.child_relation.source)
except AttributeError:
if not hasattr(current_serializer, field.source):
continue
serializer_method = getattr(current_serializer, field.source)
relation_instance = serializer_method(resource_instance)
if isinstance(relation_instance, Manager):
relation_instance = relation_instance.all()
new_included_resources = [key.replace('%s.' % field_name, '', 1)
for key in included_resources
if field_name == key.split('.')[0]]
serializer_data = resource.get(field_name)
if isinstance(field, RemoteResourceField):
user_id = getattr(request.user, 'id', None)
roles = request.user.roles
pk = serializer_data.get('id')
include = ",".join(new_included_resources)
try:
remote_resource = event_client.get_remote_resource_data(
field_name, pk=pk, user_id=user_id,
include=include, page_size=1000, roles=roles)
body = ujson.loads(remote_resource['body'])
if 400 <= remote_resource['status'] < 600:
raise RemoteResourceIncludeError(field_name, body["errors"][0])
except RequestTimeout:
raise RemoteResourceIncludeTimeoutError(field_name)
included_data.append(body['data'])
if body.get('included'):
included_data.extend(body['included'])
# We continue here since RemoteResourceField inherits
# form ResourceRelatedField which is a RelatedField
continue
if isinstance(field, relations.ManyRelatedField):
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=True, context=context)
serializer_data = field.data
if isinstance(field, relations.RelatedField):
if relation_instance is None:
continue
many = field._kwargs.get('child_relation', None) is not None
serializer_class = included_serializers[field_name]
field = serializer_class(relation_instance, many=many, context=context)
serializer_data = field.data
if isinstance(field, ListSerializer):
serializer = field.child
relation_type = utils.get_resource_type_from_serializer(serializer)
relation_queryset = list(relation_instance)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(serializer)
if serializer_data:
for position in range(len(serializer_data)):
serializer_resource = serializer_data[position]
nested_resource_instance = relation_queryset[position]
resource_type = (
relation_type or
utils.get_resource_type_from_instance(nested_resource_instance)
)
included_data.append(
cls.build_json_resource_obj(
serializer_fields, serializer_resource, nested_resource_instance, resource_type
)
)
included_data.extend(
cls.extract_included(
request, serializer_fields, serializer_resource,
nested_resource_instance, new_included_resources
)
)
if isinstance(field, Serializer):
relation_type = utils.get_resource_type_from_serializer(field)
# Get the serializer fields
serializer_fields = utils.get_serializer_fields(field)
if serializer_data:
included_data.append(
cls.build_json_resource_obj(
serializer_fields, serializer_data,
relation_instance, relation_type)
)
included_data.extend(
cls.extract_included(
request, serializer_fields, serializer_data,
relation_instance, new_included_resources
)
)
return key_formatter()(included_data)
def render(self, data, accepted_media_type=None, renderer_context=None):
view = renderer_context.get("view", None)
request = renderer_context.get("request", None)
# Get the resource name.
resource_name = utils.get_resource_name(renderer_context)
# If this is an error response, skip the rest.
if resource_name == 'errors':
return self.render_errors(data, accepted_media_type, renderer_context)
# if response.status_code is 204 then the data to be rendered must
# be None
response = renderer_context.get('response', None)
if response is not None and response.status_code == 204:
return super(renderers.JSONRenderer, self).render(
None, accepted_media_type, renderer_context
)
from rest_framework_json_api.views import RelationshipView
if isinstance(view, RelationshipView):
return self.render_relationship_view(data, accepted_media_type, renderer_context)
# If `resource_name` is set to None then render default as the dev
# wants to build the output format manually.
if resource_name is None or resource_name is False:
return super(renderers.JSONRenderer, self).render(
data, accepted_media_type, renderer_context
)
json_api_data = data
json_api_included = list()
# initialize json_api_meta with pagination meta or an empty dict
json_api_meta = data.get('meta', {}) if isinstance(data, dict) else {}
if data and 'results' in data:
serializer_data = data["results"]
else:
serializer_data = data
serializer = getattr(serializer_data, 'serializer', None)
included_resources = utils.get_included_resources(request, serializer)
if serializer is not None:
# Get the serializer fields
fields = utils.get_serializer_fields(serializer)
# Extract root meta for any type of serializer
json_api_meta.update(self.extract_root_meta(serializer, serializer_data))
try:
if getattr(serializer, 'many', False):
json_api_data = list()
for position in range(len(serializer_data)):
resource = serializer_data[position] # Get current resource
resource_instance = serializer.instance[position] # Get current instance
json_resource_obj = self.build_json_resource_obj(
fields, resource, resource_instance, resource_name)
meta = self.extract_meta(serializer, resource)
if meta:
json_resource_obj.update({'meta': key_formatter()(meta)})
json_api_data.append(json_resource_obj)
included = self.extract_included(request, fields, resource,
resource_instance, included_resources)
if included:
json_api_included.extend(included)
else:
resource_instance = serializer.instance
json_api_data = self.build_json_resource_obj(fields, serializer_data,
resource_instance, resource_name)
meta = self.extract_meta(serializer, serializer_data)
if meta:
json_api_data.update({'meta': key_formatter()(meta)})
included = self.extract_included(request, fields, serializer_data,
resource_instance, included_resources)
if included:
json_api_included.extend(included)
except RemoteResourceIncludeError as e:
return self.render_errors(e.data, accepted_media_type)
# Make sure we render data in a specific order
render_data = OrderedDict()
if isinstance(data, dict) and data.get('links'):
render_data['links'] = data.get('links')
# format the api root link list
if view.__class__ and view.__class__.__name__ == 'APIRoot':
render_data['data'] = None
render_data['links'] = json_api_data
else:
render_data['data'] = json_api_data
if len(json_api_included) > 0:
# Iterate through compound documents to remove duplicates
seen = set()
unique_compound_documents = list()
for included_dict in json_api_included:
type_tuple = tuple((included_dict['type'], included_dict['id']))
if type_tuple not in seen:
seen.add(type_tuple)
unique_compound_documents.append(included_dict)
# Sort the items by type then by id
render_data['included'] = sorted(unique_compound_documents, key=lambda item: (item['type'], item['id']))
if json_api_meta:
render_data['meta'] = key_formatter()(json_api_meta)
return super(renderers.JSONRenderer, self).render(
render_data, accepted_media_type, renderer_context
) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/renderers.py | renderers.py |
from collections import OrderedDict
from django.db.models import OneToOneField
from django.db.models.fields import related
from rest_framework.relations import ManyRelatedField
from rest_framework.serializers import DecimalField
from rest_framework.settings import api_settings
from rest_framework.utils.field_mapping import ClassLookupDict
from rest_framework_json_api.metadata import JSONAPIMetadata
from rest_framework_json_api.relations import ResourceRelatedField
from rest_framework_json_api.utils import get_related_resource_type
from zc_common.remote_resource.relations import RemoteResourceField
from zc_common.remote_resource.models import GenericRemoteForeignKey, RemoteForeignKey
class RelationshipMetadata(JSONAPIMetadata):
relation_type_lookup = ClassLookupDict({
related.ManyToManyDescriptor: 'ManyToMany',
related.ReverseManyToOneDescriptor: 'OneToMany',
related.ForwardManyToOneDescriptor: 'ManyToOne',
related.ReverseOneToOneDescriptor: 'OneToOne',
OneToOneField: 'OneToOne',
RemoteForeignKey: 'ManyToOne',
GenericRemoteForeignKey: 'ManyToOne'
})
def get_serializer_info(self, serializer):
"""
@amberylx 2020-01-10: Copied from djangorestframework-jsonapi v2.2.0 in order to remove the call to
`format_value` on the keys of the metadata object.
"""
if hasattr(serializer, 'child'):
# If this is a `ListSerializer` then we want to examine the
# underlying child serializer instance instead.
serializer = serializer.child
# Remove the URL field if present
serializer.fields.pop(api_settings.URL_FIELD_NAME, None)
return OrderedDict([
(field_name, self.get_field_info(field))
for field_name, field in serializer.fields.items()
])
def get_field_info(self, field):
field_info = super(RelationshipMetadata, self).get_field_info(field)
if isinstance(field, (RemoteResourceField, ManyRelatedField, ResourceRelatedField)):
model_class = field.parent.Meta.model
model_field = getattr(model_class, field.source)
if hasattr(model_field, 'field') and isinstance(model_field.field, OneToOneField):
# ForwardManyToOneDescriptor is used for OneToOneField also, so we have to override
model_field = model_field.field
field_info['relationship_type'] = self.relation_type_lookup[model_field]
field_info['relationship_resource'] = get_related_resource_type(field)
if field_info['relationship_resource'] == 'RemoteResource':
field_info['relationship_resource'] = model_field.type
if isinstance(field, DecimalField):
field_info['decimal_places'] = getattr(field, 'decimal_places', 2)
return field_info | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/metadata.py | metadata.py |
from django.db.models import CharField, TextField
from django.db.models import Model
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from rest_framework import viewsets
from rest_framework.exceptions import MethodNotAllowed
from rest_framework_json_api.views import RelationshipView as OldRelView
from zc_common.remote_resource.models import RemoteResource
from zc_common.remote_resource.serializers import ResourceIdentifierObjectSerializer
class ModelViewSet(viewsets.ModelViewSet):
"""
This class overwrites the ModelViewSet's list method, which handles
requests made to the collection's base endpoint (/collection), in
order to provide support for filtering via the filter[] query parameter.
Inheriting from this class, along with adding the filter backend, will properly
handle requests made to /collection as well as /collection?filter[name]=test.
It's also possible to filter by a collection of primary keys, for example:
/collection?filter[id__in]=1,2,3
Requests to filter on keys that do not exist will return an empty set.
"""
@property
def filter_fields(self):
queryset = self.get_queryset()
return_fields = {}
fields = queryset.model._meta.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != queryset.model._meta.model and
field.model._meta.concrete_model == queryset.model._meta.concrete_model):
continue
name = field.attname if hasattr(field, 'attname') else field.name
if hasattr(field, 'primary_key') and field.primary_key:
return_fields['id'] = ['in', 'exact']
elif CharField in field.__class__.__mro__ or TextField in field.__class__.__mro__:
return_fields[name] = ['icontains', 'exact']
else:
return_fields[name] = ['exact']
return return_fields
def has_ids_query_params(self):
return hasattr(self.request, 'query_params') and 'filter[id__in]' in self.request.query_params
class RelationshipView(OldRelView):
serializer_class = ResourceIdentifierObjectSerializer
def patch(self, request, *args, **kwargs):
"""
Restricting PATCH requests made to the relationship view temporarily to
prevent the possibility of data corruption when PATCH requests are made
to to-many related resources. This override will not be necessary
once a fix is made upstream.
See:
https://github.com/django-json-api/django-rest-framework-json-api/issues/242
"""
raise MethodNotAllowed('PATCH')
def _instantiate_serializer(self, instance):
if isinstance(instance, RemoteResource):
return ResourceIdentifierObjectSerializer(instance=instance)
if isinstance(instance, Model) or instance is None:
return self.get_serializer(instance=instance)
else:
if isinstance(instance, (QuerySet, Manager)):
instance = instance.all()
return self.get_serializer(instance=instance, many=True) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/views.py | views.py |
import ujson
from rest_framework import parsers
from rest_framework.exceptions import ParseError
from rest_framework_json_api import utils, renderers, exceptions
from zc_common.remote_resource import utils as zc_common_utils
# `format_keys()` was replaced with `format_field_names()` from rest_framework_json_api in 3.0.0
def key_formatter():
try:
return zc_common_utils.format_keys
except AttributeError:
return utils.format_keys
class JSONParser(parsers.JSONParser):
"""
A JSON API client will send a payload that looks like this:
{
"data": {
"type": "identities",
"id": 1,
"attributes": {
"first_name": "John",
"last_name": "Coltrane"
}
}
}
We extract the attributes so that DRF serializers can work as normal.
"""
media_type = 'application/vnd.api+json'
renderer_class = renderers.JSONRenderer
@staticmethod
def parse_attributes(data):
return key_formatter()(data.get('attributes'), 'underscore') if data.get('attributes') else dict()
@staticmethod
def parse_relationships(data):
relationships = (key_formatter()(data.get('relationships'), 'underscore')
if data.get('relationships') else dict())
# Parse the relationships
parsed_relationships = dict()
for field_name, field_data in relationships.items():
field_data = field_data.get('data')
if isinstance(field_data, dict) or field_data is None:
parsed_relationships[field_name] = field_data
elif isinstance(field_data, list):
parsed_relationships[field_name] = list(relation for relation in field_data)
return parsed_relationships
@staticmethod
def parse_metadata(result):
metadata = result.get('meta')
if metadata:
return {'_meta': metadata}
else:
return {}
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data
"""
if hasattr(stream, 'raw_body'):
result = stream.raw_body
else:
# Handles requests created by Django's test client, which is missing the raw_body attribute set in
# the Django request-like object initialized by our zc_event event client
try:
result = ujson.loads(stream.body)
except ValueError:
result = {}
data = result.get('data')
if data:
from rest_framework_json_api.views import RelationshipView
if isinstance(parser_context['view'], RelationshipView):
# We skip parsing the object as JSONAPI Resource Identifier Object and not a regular Resource Object
if isinstance(data, list):
for resource_identifier_object in data:
if not (resource_identifier_object.get('id') and resource_identifier_object.get('type')):
raise ParseError(
'Received data contains one or more malformed JSONAPI Resource Identifier Object(s)'
)
elif not (data.get('id') and data.get('type')):
raise ParseError('Received data is not a valid JSONAPI Resource Identifier Object')
return data
request = parser_context.get('request')
# Check for inconsistencies
resource_name = utils.get_resource_name(parser_context)
view = parser_context.get('view')
if data.get('type') != resource_name and request.method in ('PUT', 'POST', 'PATCH'):
raise exceptions.Conflict(
"The resource object's type ({data_type}) is not the type "
"that constitute the collection represented by the endpoint ({resource_type}).".format(
data_type=data.get('type'),
resource_type=resource_name
)
)
# Construct the return data
parsed_data = {'id': data.get('id')}
parsed_data.update(self.parse_attributes(data))
parsed_data.update(self.parse_relationships(data))
parsed_data.update(self.parse_metadata(result))
return parsed_data
else:
raise ParseError('Received document does not contain primary data') | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/parsers.py | parsers.py |
from collections import OrderedDict
import ujson
import six
from django.db.models.manager import BaseManager
from rest_framework_json_api.relations import ResourceRelatedField
from zc_common.remote_resource.models import RemoteResource
class RemoteResourceField(ResourceRelatedField):
def __init__(self, related_resource_path=None, **kwargs):
if 'model' not in kwargs:
kwargs['model'] = RemoteResource
if not kwargs.get('read_only', None):
# The queryset is required to be not None, but not used
# due to the overriding of the methods below.
kwargs['queryset'] = {}
if related_resource_path is None:
raise NameError('related_resource_path parameter must be provided')
self.related_resource_path = related_resource_path
super(RemoteResourceField, self).__init__(**kwargs)
def get_links(self, obj=None, lookup_field='pk'):
request = self.context.get('request', None)
view = self.context.get('view', None)
return_data = OrderedDict()
kwargs = {lookup_field: getattr(obj, lookup_field) if obj else view.kwargs[lookup_field]}
self_kwargs = kwargs.copy()
self_kwargs.update({'related_field': self.field_name if self.field_name else self.parent.field_name})
self_link = self.get_url('self', self.self_link_view_name, self_kwargs, request)
# Construct the related link using the passed related_resource_path
# self.source is the field name; getattr(obj, self.source) returns the
# RemoteResource object or RelatedManager in the case of a to-many relationship.
related_obj = getattr(obj, self.source)
if related_obj and related_obj.id:
if isinstance(related_obj, BaseManager):
list_of_ids = related_obj.values_list('pk', flat=True)
query_parameters = 'filter[id__in]={}'.format(','.join([str(pk) for pk in list_of_ids]))
related_path = self.related_resource_path.format(pk=query_parameters)
else:
related_path = self.related_resource_path.format(pk=related_obj.id)
related_link = request.build_absolute_uri(related_path)
else:
related_link = None
if self_link:
return_data.update({'self': self_link})
if related_link:
return_data.update({'related': related_link})
return return_data
def to_internal_value(self, data):
if isinstance(data, six.text_type):
try:
data = ujson.loads(data)
except ValueError:
self.fail('incorrect_type', data_type=type(data).__name__)
if not isinstance(data, dict):
self.fail('incorrect_type', data_type=type(data).__name__)
if 'type' not in data:
self.fail('missing_type')
if 'id' not in data:
self.fail('missing_id')
return RemoteResource(data['type'], data['id'])
def to_representation(self, value):
return OrderedDict([('type', value.type), ('id', str(value.id))]) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/relations.py | relations.py |
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework_json_api.utils import (
get_resource_type_from_model, get_resource_type_from_instance)
from zc_common.remote_resource.models import RemoteResource
class ResourceIdentifierObjectSerializer(serializers.BaseSerializer):
default_error_messages = {
'incorrect_model_type': _('Incorrect model type. Expected {model_type}, received {received_type}.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
model_class = None
def __init__(self, *args, **kwargs):
self.model_class = kwargs.pop('model_class', self.model_class)
if 'instance' not in kwargs and not self.model_class:
raise RuntimeError('ResourceIdentifierObjectsSerializer must be initialized with a model class.')
super(ResourceIdentifierObjectSerializer, self).__init__(*args, **kwargs)
def to_representation(self, instance):
if isinstance(instance, RemoteResource):
return {'type': instance.type, 'id': instance.id}
return {
'type': get_resource_type_from_instance(instance),
'id': str(instance.pk)
}
def to_internal_value(self, data):
model_class = get_resource_type_from_model(self.model_class)
if model_class == "RemoteResource":
return RemoteResource(data['type'], data['id'])
if data['type'] != model_class:
self.fail('incorrect_model_type', model_type=self.model_class, received_type=data['type'])
pk = data['id']
try:
return self.model_class.objects.get(pk=pk)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=pk)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data['pk']).__name__)
class IncludedDict(dict):
def get(self, *args):
return lambda *a: True
class RemoteResourceSerializer(object):
included_serializers = IncludedDict() | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/serializers.py | serializers.py |
from __future__ import unicode_literals
from django.http import Http404
from rest_framework import HTTP_HEADER_ENCODING, exceptions
from rest_framework.settings import api_settings
from rest_framework.utils.mediatypes import (
_MediaType, media_type_matches, order_by_precedence
)
from rest_framework.negotiation import BaseContentNegotiation
class JsonAPIContentNegotiation(BaseContentNegotiation):
settings = api_settings
def select_parser(self, request, parsers):
"""
Given a list of parsers and a media type, return the appropriate
parser to handle the incoming request.
"""
for parser in parsers:
if media_type_matches(parser.media_type, request.content_type):
return parser
return None
def select_renderer(self, request, renderers, format_suffix=None):
"""
Given a request and a list of renderers, return a two-tuple of:
(renderer, media type).
"""
accepts = self.get_accept_list(request)
# Check the acceptable media types against each renderer,
# attempting more specific media types first
# NB. The inner loop here isn't as bad as it first looks :)
# Worst case is we're looping over len(accept_list) * len(self.renderers)
for media_type_set in order_by_precedence(accepts):
for renderer in renderers:
for media_type in media_type_set:
if media_type_matches(renderer.media_type, media_type):
# Return the most specific media type as accepted.
media_type_wrapper = _MediaType(media_type)
if (
_MediaType(renderer.media_type).precedence >
media_type_wrapper.precedence
):
# Eg client requests '*/*'
# Accepted media type is 'application/json'
full_media_type = ';'.join(
(renderer.media_type,) +
tuple('{0}={1}'.format(
key, value.decode(HTTP_HEADER_ENCODING))
for key, value in media_type_wrapper.params.items()))
return renderer, full_media_type
else:
# Eg client requests 'application/json; indent=8'
# Accepted media type is 'application/json; indent=8'
return renderer, media_type
raise exceptions.NotAcceptable(available_renderers=renderers)
def filter_renderers(self, renderers, format):
"""
If there is a '.json' style format suffix, filter the renderers
so that we only negotiation against those that accept that format.
"""
renderers = [renderer for renderer in renderers
if renderer.format == format]
if not renderers:
raise Http404
return renderers
def get_accept_list(self, request):
"""
Given the incoming request, return a tokenized list of media
type strings.
"""
header = request.META.get('HTTP_ACCEPT', '*/*')
return [token.strip() for token in header.split(',')] | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/negotiation.py | negotiation.py |
from collections import OrderedDict
from django.utils.six.moves.urllib import parse as urlparse
from rest_framework.pagination import PageNumberPagination as OldPagination
from rest_framework.views import Response
def remove_query_param(url, key):
"""
Given a URL and a key/val pair, remove an item in the query
parameters of the URL, and return the new URL.
Forked from rest_framework.utils.urls; overwriten here because we
need to pass keep_blank_values=True to urlparse.parse_qs() so that
it doesn't remove the ?ifilter[id__in]= blank query parameter from
our links in the case of an empty remote to-many link.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query, keep_blank_values=True)
query_dict.pop(key, None)
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def replace_query_param(url, key, val):
"""
Given a URL and a key/val pair, set or replace an item in the query
parameters of the URL, and return the new URL.
Forked from rest_framework.utils.urls; overwriten here because we
need to pass keep_blank_values=True to urlparse.parse_qs() so that
it doesn't remove the ?filter[id__in]= blank query parameter from
our links in the case of an empty remote to-many link.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query, keep_blank_values=True)
query_dict[key] = [val]
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
class PageNumberPagination(OldPagination):
"""
A json-api compatible pagination format
"""
page_size_query_param = 'page_size'
max_page_size = 1000
def build_link(self, index):
if not index:
return None
url = self.request and self.request.build_absolute_uri() or ''
return replace_query_param(url, 'page', index)
def get_paginated_response(self, data):
next_page = None
previous_page = None
if self.page.has_next():
next_page = self.page.next_page_number()
if self.page.has_previous():
previous_page = self.page.previous_page_number()
# hamedahmadi 05/02/2016 -- Adding this to include self link
self_url = remove_query_param(self.request.build_absolute_uri(), self.page_query_param)
return Response({
'results': data,
'meta': {
'pagination': OrderedDict([
('page', self.page.number),
('pages', self.page.paginator.num_pages),
('count', self.page.paginator.count),
])
},
'links': OrderedDict([
('self', self_url),
('first', self.build_link(1)),
('last', self.build_link(self.page.paginator.num_pages)),
('next', self.build_link(next_page)),
('prev', self.build_link(previous_page))
])
}) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/pagination.py | pagination.py |
from __future__ import unicode_literals
from django.db import models
from django.db.models import signals
class RemoteResource(object):
def __init__(self, type_name, pk):
self.type = str(type_name) if type_name else None
self.id = str(pk) if pk else None
class RemoteForeignKey(models.CharField):
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
description = "A foreign key pointing to an external resource"
def __init__(self, type_name, *args, **kwargs):
if 'max_length' not in kwargs:
kwargs['max_length'] = 50
if 'db_index' not in kwargs:
kwargs['db_index'] = True
if 'db_column' not in kwargs:
kwargs['db_column'] = "%s_id" % type_name.lower()
self.type = type_name
super(RemoteForeignKey, self).__init__(*args, **kwargs)
def from_db_value(self, value, expression, connection, context):
return RemoteResource(self.type, value)
def to_python(self, value):
if isinstance(value, RemoteResource):
return value.id
if isinstance(value, basestring):
return value
if value is None:
return value
raise ValueError("Can not convert value to a RemoteResource properly")
def deconstruct(self):
name, path, args, kwargs = super(RemoteForeignKey, self).deconstruct()
args = tuple([self.type] + list(args))
del kwargs['max_length']
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
self.set_attributes_from_name(name)
self.name = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
class GenericRemoteForeignKey(object):
"""
Provide a generic many-to-one relation through the ``resource_type`` and
``resource_id`` fields.
This class also doubles as an accessor to the related object (similar to
Django ForeignKeys) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, resource_types=None, rt_field='resource_type', id_field='resource_id'):
if resource_types is None:
raise TypeError('resource_types cannot be None')
self.resource_types = resource_types
self.type = resource_types # For metadata class
self.rt_field = rt_field
self.id_field = id_field
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
if not isinstance(value, RemoteResource):
raise ValueError(
'GenericRemoteForeignKey only accepts RemoteResource objects as values'
)
kwargs[self.rt_field] = value.type
kwargs[self.id_field] = value.id
else:
kwargs[self.rt_field] = None
kwargs[self.id_field] = None
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
resource_type = getattr(instance, self.rt_field)
resource_id = getattr(instance, self.id_field)
rel_obj = RemoteResource(resource_type, resource_id)
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
rt = None
pk = None
if value is not None:
if not isinstance(value, RemoteResource):
raise ValueError(
'GenericRemoteForeignKey only accepts RemoteResource objects as values'
)
if value.type not in self.resource_types:
raise ValueError(
'Value must be of type {}, got {}'.format(self.resource_types, value.type)
)
rt = value.type
pk = value.id
setattr(instance, self.rt_field, rt)
setattr(instance, self.id_field, pk)
setattr(instance, self.cache_attr, value) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/remote_resource/models.py | models.py |
import jwt
from django.utils.encoding import smart_text
from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.utils import jwt_decode_handler
class User(object):
"""
A class that emulates Django's auth User, for use with microservices where
the actual User is unavailable. Surfaces via `request.user`.
"""
def __init__(self, **kwargs):
self.pk = kwargs.pop('pk', None) or kwargs.pop('id', None)
self.id = self.pk
self.roles = []
self.company_permissions = {}
for kwarg in kwargs:
setattr(self, kwarg, kwargs[kwarg])
def is_authenticated(self):
# Roles (i.e. anonymous, user, etc) are handled by permissions classes
return True
def get_roles(self):
"""
For testing purposes only. Emulates `get_roles` in
https://github.com/ZeroCater/mp-users/blob/master/users/models.py
"""
return self.roles
class JWTAuthentication(BaseAuthentication):
"""
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string specified in the setting
`JWT_AUTH_HEADER_PREFIX`. For example:
Authorization: JWT eyJhbGciOiAiSFMyNTYiLCAidHlwIj
"""
www_authenticate_realm = 'api'
@staticmethod
def get_jwt_value(request):
auth = get_authorization_header(request).split()
auth_header_prefix = api_settings.JWT_AUTH_HEADER_PREFIX.lower()
if not auth or smart_text(auth[0].lower()) != auth_header_prefix:
return None
if len(auth) == 1: # pragma: no cover
msg = 'Invalid Authorization header. No credentials provided.'
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2: # pragma: no cover
msg = 'Invalid Authorization header. Credentials string should not contain spaces.'
raise exceptions.AuthenticationFailed(msg)
return auth[1]
def authenticate(self, request):
"""
Returns a two-tuple of `User` and token if a valid signature has been
supplied using JWT-based authentication. Otherwise returns `None`.
"""
jwt_value = self.get_jwt_value(request)
if jwt_value is None:
raise exceptions.NotAuthenticated()
try:
payload = jwt_decode_handler(jwt_value)
except jwt.ExpiredSignature: # pragma: no cover
msg = 'Signature has expired.'
raise exceptions.AuthenticationFailed(msg)
except jwt.DecodeError: # pragma: no cover
msg = 'Error decoding signature.'
raise exceptions.AuthenticationFailed(msg)
except jwt.InvalidTokenError: # pragma: no cover
raise exceptions.AuthenticationFailed()
except Exception as ex:
raise exceptions.AuthenticationFailed(ex.message)
user = User(**payload)
return user, jwt_value
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(api_settings.JWT_AUTH_HEADER_PREFIX, self.www_authenticate_realm) | zc-common | /zc_common-0.4.17-py3-none-any.whl/zc_common/jwt_auth/authentication.py | authentication.py |
import logging
import uuid
import zlib
import ujson
from zc_events.exceptions import RequestTimeout, ServiceRequestException
from zc_events.request import wrap_resource_from_response
class Event(object):
def __init__(self, event_client, event_type, *args, **kwargs):
self.event_client = event_client
self.event_type = event_type
self.args = args
self.kwargs = kwargs
self._emit = False
self._wait = False
self._complete = False
self._reponse = None
def emit(self):
event_type = self.event_type
args = self.args
kwargs = self.kwargs
return self.event_client.emit_microservice_event(event_type, *args, **kwargs)
def wait(self):
raise NotImplementedError("Base Event does not support this method")
def complete(self):
raise NotImplementedError("Base Event does not support this method")
class RequestEvent(Event):
def __init__(self, *args, **kwargs):
self.response_key = 'request-{}'.format(uuid.uuid4())
if kwargs.get('response_key'):
raise AttributeError("kwargs should not include reserved key 'response_key'")
kwargs['response_key'] = self.response_key
super(RequestEvent, self).__init__(*args, **kwargs)
def wait(self):
if self._wait:
return self._reponse
result = self.event_client.wait_for_response(self.response_key)
if not result:
raise RequestTimeout
self._response = ujson.loads(zlib.decompress(result[1]).decode('utf-8'))
self._wait = True
return self._response
def complete(self):
if not self._wait:
self._response = self.wait()
if 400 <= self._response['status'] < 600:
raise ServiceRequestException(self._response['body'])
self._complete = True
return self._response
class ResourceRequestEvent(RequestEvent):
def complete(self):
super(ResourceRequestEvent, self).complete()
wrapped_resource = wrap_resource_from_response(self._response)
return wrapped_resource | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/event.py | event.py |
import six
from datetime import date
import time
from zc_events.aws import save_string_contents_to_s3, save_file_contents_to_s3
S3_BUCKET_NAME = 'zc-mp-email'
def generate_s3_folder_name(email_uuid):
email_date = date.today().isoformat()
email_timestamp = int(time.time())
return "{}/{}_{}".format(email_date, email_timestamp, email_uuid)
def generate_s3_content_key(s3_folder_name, content_type, content_name=''):
content_key = "{}/{}".format(s3_folder_name, content_type)
if content_name:
content_key += '_{}'.format(content_name)
return content_key
def generate_email_data(email_uuid, from_email=None, to=None, cc=None, bcc=None, reply_to=None, subject=None,
plaintext_body=None, html_body=None, headers=None, files=None, attachments=None,
user_id=None, resource_type=None, resource_id=None, unsubscribe_group=None,
is_transactional=False, **kwargs):
"""
files: A list of file paths
attachments: A list of tuples of the format (filename, content_type, content)
"""
s3_folder_name = generate_s3_folder_name(email_uuid)
to = to.split(',') if isinstance(to, six.string_types) else to
cc = cc.split(',') if isinstance(cc, six.string_types) else cc
bcc = bcc.split(',') if isinstance(bcc, six.string_types) else bcc
reply_to = reply_to.split(',') if isinstance(reply_to, six.string_types) else reply_to
for arg in (to, cc, bcc, reply_to):
if arg and not isinstance(arg, list):
msg = "Keyword arguments 'to', 'cc', 'bcc', and 'reply_to' should be of <type 'list'>"
raise TypeError(msg)
if not any([to, cc, bcc, reply_to]):
msg = "Keyword arguments 'to', 'cc', 'bcc', and 'reply_to' can't all be empty"
raise TypeError(msg)
html_body_key = None
if html_body:
html_body_key = generate_s3_content_key(s3_folder_name, 'html')
save_string_contents_to_s3(html_body, S3_BUCKET_NAME, html_body_key)
plaintext_body_key = None
if plaintext_body:
plaintext_body_key = generate_s3_content_key(s3_folder_name, 'plaintext')
save_string_contents_to_s3(plaintext_body, S3_BUCKET_NAME, plaintext_body_key)
attachments_keys = []
if attachments:
for filename, mimetype, attachment in attachments:
attachment_key = generate_s3_content_key(s3_folder_name, 'attachment',
content_name=filename)
save_string_contents_to_s3(attachment, S3_BUCKET_NAME, attachment_key)
attachments_keys.append(attachment_key)
if files:
for filepath in files:
filename = filepath.split('/')[-1]
attachment_key = generate_s3_content_key(s3_folder_name, 'attachment',
content_name=filename)
save_file_contents_to_s3(filepath, S3_BUCKET_NAME, attachment_key)
attachments_keys.append(attachment_key)
event_data = {
'from_email': from_email,
'to': to,
'cc': cc,
'bcc': bcc,
'reply_to': reply_to,
'subject': subject,
'plaintext_body_key': plaintext_body_key,
'html_body_key': html_body_key,
'attachments_keys': attachments_keys,
'headers': headers,
'user_id': user_id,
'resource_type': resource_type,
'resource_id': resource_id,
'task_id': str(email_uuid),
'is_transactional': is_transactional
}
if unsubscribe_group:
event_data['unsubscribe_group'] = unsubscribe_group
return event_data | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/email.py | email.py |
from __future__ import division
import logging
import math
import ujson
import uuid
from six.moves import urllib
import pika
import pika_pool
import redis
from zc_events.config import settings
from inflection import underscore
from zc_events.aws import save_string_contents_to_s3
from zc_events.django_request import structure_response, create_django_request_object
from zc_events.email import generate_email_data
from zc_events.event import ResourceRequestEvent
from zc_events.exceptions import EmitEventException, ImproperlyConfigured
from zc_events.utils import notification_event_payload
from zc_events.backends import RabbitMqFanoutBackend
SERVICE_ACTOR = 'service'
ANONYMOUS_ACTOR = 'anonymous'
SERVICE_ROLES = [SERVICE_ACTOR]
ANONYMOUS_ROLES = [ANONYMOUS_ACTOR]
logger = logging.getLogger('django')
_DEPRECATE_MESSAGE = "DEPRECATION WARNING: Use one of the HTTP verb methods instead"
def _deprecated():
logger.warning(_DEPRECATE_MESSAGE)
def _format_data_structure(data, headers, response_key=False):
return {
'data': data,
'id': str(uuid.uuid4()),
'headers': headers,
'response_key': str(uuid.uuid4()) if response_key else None
}
class MethodNotAllowed(Exception):
status_code = 405
default_detail = 'Method "{method}" not allowed.'
def __init__(self, method, detail=None):
if detail is not None:
self.detail = detail
else:
self.detail = self.default_detail.format(method=method)
def __str__(self):
return self.detail
class EventClient(object):
"""Used on the client side to send rpc-style events.
Non-deprecated methods are backend agnostic, and documented.
Note:
This is in a state of being upgraded, and is not totally backend agnostic yet.
In the next major release it will be backend agnostic, and will be removing many of the public methods,
but first we are providing the new methods in a back-wards compatible way.
"""
def __init__(self):
self.__backend = None
self._redis_client = None
self._pika_pool = None
self._notifications_exchange = None
self._events_exchange = None
@property
def _backend(self):
if not self.__backend:
self.__backend = RabbitMqFanoutBackend(
redis_client=self.redis_client,
pika_pool=self.pika_pool
)
return self.__backend
@property
def redis_client(self):
if not self._redis_client:
pool = redis.ConnectionPool().from_url(settings.EVENTS_REDIS_URL, db=0)
self._redis_client = redis.Redis(connection_pool=pool)
return self._redis_client
@property
def pika_pool(self):
if not self._pika_pool:
pika_params = pika.URLParameters(settings.BROKER_URL)
pika_params.socket_timeout = 5
self._pika_pool = pika_pool.QueuedPool(
create=lambda: pika.BlockingConnection(parameters=pika_params),
max_size=10,
max_overflow=10,
timeout=10,
recycle=3600,
stale=45,
)
return self._pika_pool
@property
def notifications_exchange(self):
if not self._notifications_exchange:
self._notifications_exchange = getattr(settings, 'NOTIFICATIONS_EXCHANGE', None)
return self._notifications_exchange
@property
def events_exchange(self):
if not self._events_exchange:
self._events_exchange = settings.EVENTS_EXCHANGE
return self._events_exchange
def _format_and_make_call(self, key, data, headers, response_key, method):
formatted_data = _format_data_structure(data, headers, response_key)
return getattr(self._backend, method)(key, formatted_data)
def call(self, key, data={}, headers={}):
"""Call a function in rpc-style a way and wait for the response.
This is a thin wrapper around `post`, and is provided to make code readable in situations
where the name `call` may make more sense.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
Response: An object containing the response from the remove function.
"""
return self.post(key, data=data, headers=headers)
def call_no_wait(self, key, data={}, headers={}):
"""Call a function in rpc-style a way, without waiting for any response.
This is a thin wrapper around `post_no_wait`, and is provided to make code readable in situations
where the name `call_no_wait` may make more sense.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
None
"""
return self.post_no_wait(key, data=data, headers=headers)
def get(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a GET http request, and wait for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
Response: An object containing the response from the remove function.
"""
return self._format_and_make_call(
key, data, headers, True, 'get'
)
def put(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a PUT http request, and wait for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
Response: An object containing the response from the remove function.
"""
return self._format_and_make_call(
key, data, headers, True, 'put'
)
def put_no_wait(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a PUT http request, without waiting for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
None
"""
return self._format_and_make_call(
key, data, headers, False, 'put_no_wait'
)
def post(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a POST http request, and wait for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
Response: An object containing the response from the remove function.
"""
return self._format_and_make_call(
key, data, headers, True, 'post'
)
def post_no_wait(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a POST http request, without waiting for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
None
"""
return self._format_and_make_call(
key, data, headers, False, 'post_no_wait'
)
def delete(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a DELETE http request, and wait for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
Response: An object containing the response from the remove function.
"""
return self._format_and_make_call(
key, data, headers, True, 'delete'
)
def delete_no_wait(self, key, data={}, headers={}):
"""Call a remote function in an analogous fashion to a DELETE http request, without waiting for a response.
Args:
key (str): The key used to lookup the function to be called.
data (dict, optional): The data to be sent to the remote function.
headers (dict, optional): Optional, http style, information to be sent to the remote function.
Returns:
None
"""
return self._format_and_make_call(
key, data, headers, False, 'delete_no_wait'
)
def emit_microservice_message(self, exchange, routing_key, event_type, priority=0, *args, **kwargs):
_deprecated()
task_id = str(uuid.uuid4())
keyword_args = {'task_id': task_id}
keyword_args.update(kwargs)
task = 'microservice.notification' if routing_key else 'microservice.event'
message = {
'task': task,
'id': task_id,
'args': [event_type] + list(args),
'kwargs': keyword_args
}
event_queue_name = '{}-events'.format(settings.SERVICE_NAME)
event_body = ujson.dumps(message)
logger.info('{}::EMIT: Emitting [{}:{}] event for object ({}:{}) and user {}'.format(
exchange.upper(), event_type, task_id, kwargs.get('resource_type'), kwargs.get('resource_id'),
kwargs.get('user_id')))
queue_arguments = {
'x-max-priority': 10
}
with self.pika_pool.acquire() as cxn:
cxn.channel.queue_declare(queue=event_queue_name, durable=True, arguments=queue_arguments)
response = cxn.channel.basic_publish(
exchange,
routing_key,
event_body,
pika.BasicProperties(
content_type='application/json',
content_encoding='utf-8',
priority=priority
)
)
if not response:
logger.info(
'''{}::EMIT_FAILURE: Failure emitting [{}:{}] event for object ({}:{}) and user {}'''.format(
exchange.upper(), event_type, task_id, kwargs.get('resource_type'),
kwargs.get('resource_id'), kwargs.get('user_id')))
raise EmitEventException("Message may have failed to deliver")
return response
def emit_microservice_event(self, event_type, *args, **kwargs):
_deprecated()
return self.emit_microservice_message(self.events_exchange, '', event_type, *args, **kwargs)
def emit_microservice_email_notification(self, event_type, *args, **kwargs):
return self.emit_microservice_message(
self.notifications_exchange, 'microservice.notification.email', event_type, *args, **kwargs)
def emit_microservice_text_notification(self, event_type, *args, **kwargs):
return self.emit_microservice_message(
self.notifications_exchange, 'microservice.notification.text', event_type, *args, **kwargs)
def wait_for_response(self, response_key):
response = self.redis_client.blpop(response_key, 60)
return response
def _get_handler_for_viewset(self, viewset, is_detail):
if is_detail:
methods = [
('get', 'retrieve'),
('put', 'update'),
('patch', 'partial_update'),
('delete', 'destroy'),
]
else:
methods = [
('get', 'list'),
('post', 'create'),
]
actions = {}
for method, action in methods:
if hasattr(viewset, action):
actions[method] = action
return viewset.as_view(actions)
def handle_request_event(self, event, view=None, viewset=None, relationship_viewset=None):
"""
Method to handle routing request event to appropriate view by constructing
a request object based on the parameters of the event.
"""
request = create_django_request_object(
roles=event.get('roles'),
query_string=event.get('query_string'),
method=event.get('method'),
user_id=event.get('user_id', None),
body=event.get('body', None),
http_host=event.get('http_host', None)
)
if not any([view, viewset, relationship_viewset]):
raise ImproperlyConfigured('handle_request_event must be passed either a view or viewset')
response_key = event.get('response_key')
pk = event.get('pk', None)
relationship = event.get('relationship', None)
related_resource = event.get('related_resource', None)
handler_kwargs = {}
if view:
handler = view.as_view()
if pk:
handler_kwargs['pk'] = pk
elif pk:
handler_kwargs['pk'] = pk
if relationship:
# Relationship views expect this kwarg as 'related_field'. See https://goo.gl/WW4ePd
handler_kwargs['related_field'] = relationship
handler = relationship_viewset.as_view()
elif related_resource:
handler = viewset.as_view({'get': related_resource})
handler_kwargs['related_resource'] = related_resource
else:
handler = self._get_handler_for_viewset(viewset, is_detail=True)
else:
handler = self._get_handler_for_viewset(viewset, is_detail=False)
result = handler(request, **handler_kwargs)
# Takes result and drops it into Redis with the key passed in the event
self.redis_client.rpush(response_key, structure_response(result.status_code, result.rendered_content))
self.redis_client.expire(response_key, 60)
def async_resource_request(self, resource_type, resource_id=None, user_id=None, query_string=None, method=None,
data=None, related_resource=None, roles=None, priority=5):
roles = roles or ANONYMOUS_ROLES
event = ResourceRequestEvent(
self,
'{}_request'.format(underscore(resource_type)),
method=method,
user_id=user_id,
roles=roles,
pk=resource_id,
query_string=query_string,
related_resource=related_resource,
body=data,
priority=priority
)
event.emit()
return event
def make_service_request(self, resource_type, resource_id=None, user_id=None, query_string=None, method=None,
data=None, related_resource=None):
roles = SERVICE_ROLES
event = self.async_resource_request(resource_type, resource_id=resource_id, user_id=user_id,
query_string=query_string, method=method,
data=data, related_resource=related_resource, roles=roles)
return event.wait()
def get_remote_resource_async(self, resource_type, pk=None, user_id=None, include=None, page_size=None,
related_resource=None, query_params=None, roles=None, priority=None):
"""
Function called by services to make a request to another service for a resource.
"""
query_string = None
params = query_params or {}
method = 'GET'
if pk and isinstance(pk, (list, set)):
params['filter[id__in]'] = ','.join([str(_) for _ in pk])
pk = None
if include:
params['include'] = include
if page_size:
params['page_size'] = page_size
if params:
query_string = urllib.parse.urlencode(params)
event = self.async_resource_request(resource_type, resource_id=pk, user_id=user_id,
query_string=query_string, method=method,
related_resource=related_resource, roles=roles, priority=priority)
return event
def get_remote_resource(self, resource_type, pk=None, user_id=None, include=None, page_size=None,
related_resource=None, query_params=None, roles=None):
event = self.get_remote_resource_async(resource_type, pk=pk, user_id=user_id, include=include,
page_size=page_size, related_resource=related_resource,
query_params=query_params, roles=roles)
wrapped_resource = event.complete()
return wrapped_resource
def get_remote_resource_data(self, resource_type, pk=None, user_id=None, include=None, page_size=None,
related_resource=None, query_params=None, roles=None):
priority = 9
event = self.get_remote_resource_async(resource_type, pk=pk, user_id=user_id, include=include,
page_size=page_size, related_resource=related_resource,
query_params=query_params, roles=roles, priority=priority)
data = event.wait()
return data
def send_email(self, *args, **kwargs):
email_uuid = uuid.uuid4()
to = kwargs.get('to')
from_email = kwargs.get('from_email')
attachments = kwargs.get('attachments')
files = kwargs.get('files')
if logger:
msg = '''MICROSERVICE_SEND_EMAIL: Upload email with UUID {}, to {}, from {},
with attachments {} and files {}'''
logger.info(msg.format(email_uuid, to, from_email, attachments, files))
event_data = generate_email_data(email_uuid, *args, **kwargs)
if logger:
logger.info('MICROSERVICE_SEND_EMAIL: Sent email with UUID {} and data {}'.format(
email_uuid, event_data
))
self.emit_microservice_email_notification('send_email', **event_data)
def emit_index_rebuild_event(self, event_name, resource_type, model, batch_size, serializer, queryset=None):
"""
A special helper method to emit events related to index_rebuilding.
Note: AWS_INDEXER_BUCKET_NAME must be present in your settings.
We loop over the table and each turn, we take `batch_size` objects and emit an event for them.
"""
if queryset is None:
queryset = model.objects.all()
objects_count = queryset.count()
total_events_count = int(math.ceil(objects_count / batch_size))
emitted_events_count = 0
while emitted_events_count < total_events_count:
start_index = emitted_events_count * batch_size
end_index = start_index + batch_size
data = []
for instance in queryset.order_by('id')[start_index:end_index]:
instance_data = serializer(instance)
data.append(instance_data)
stringified_data = ujson.dumps(data)
filename = save_string_contents_to_s3(stringified_data, settings.AWS_INDEXER_BUCKET_NAME)
payload = notification_event_payload(resource_type=resource_type, resource_id=None, user_id=None,
meta={'s3_key': filename})
self.emit_microservice_event(event_name, **payload)
emitted_events_count += 1 | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/client.py | client.py |
from __future__ import (
absolute_import, print_function, division, unicode_literals
)
import inspect
import ujson as json_module
import re
import six
from six.moves import urllib
from collections import namedtuple, Sequence, Sized
from functools import update_wrapper
from inflection import camelize
from zc_events.exceptions import ServiceRequestException
Call = namedtuple('Call', ['resource_type', 'params', 'response'])
_wrapper_template = """\
def wrapper%(signature)s:
with responses:
return func%(funcargs)s
"""
class Response(object):
"""Represents a response from an event, inspired by the requests library.
When the client performs an action that generates a response, that response
is represented by this class.
Example:
```
r = client.get('add', data={'x': 1, 'y': 2})
assert r.has_errors is False
assert r.data == 3
assert r.errors == []
r = client.get('add', data={'x': 1})
assert r.has_errors is True
assert r.data is None
assert r.errors = .errors == [{'type': 'KeyError', 'message': "'y'"}]
```
Attributes:
data (dict, object or None): The data returned by call, if no errors occur.
has_errors (bool): If an error was captured, it is set to True.
errors (list of dicts): Holds the errors that were thrown, if any.
"""
def __init__(self, response):
for key, value in list(response.items()):
setattr(self, key, value)
def get_wrapped(func, wrapper_template, evaldict):
# Preserve the argspec for the wrapped function so that testing
# tools such as pytest can continue to use their fixture injection.
args, a, kw, defaults = inspect.getargspec(func)
signature = inspect.formatargspec(args, a, kw, defaults)
is_bound_method = hasattr(func, '__self__')
if is_bound_method:
args = args[1:] # Omit 'self'
callargs = inspect.formatargspec(args, a, kw, None)
ctx = {'signature': signature, 'funcargs': callargs}
six.exec_(wrapper_template % ctx, evaldict)
wrapper = evaldict['wrapper']
update_wrapper(wrapper, func)
if is_bound_method:
wrapper = wrapper.__get__(func.__self__, type(func.__self__))
return wrapper
class CallList(Sequence, Sized):
def __init__(self):
self._calls = []
def __iter__(self):
return iter(self._calls)
def __len__(self):
return len(self._calls)
def __getitem__(self, idx):
return self._calls[idx]
def add(self, resource_type, params, response):
self._calls.append(Call(resource_type, params, response))
def reset(self):
self._calls = []
class EventRequestsMock(object):
DELETE = 'DELETE'
GET = 'GET'
HEAD = 'HEAD'
OPTIONS = 'OPTIONS'
PATCH = 'PATCH'
POST = 'POST'
PUT = 'PUT'
def __init__(self, assert_all_requests_are_fired=True):
self._calls = CallList()
self.reset()
self.assert_all_requests_are_fired = assert_all_requests_are_fired
def reset(self):
self._events = []
self._calls.reset()
def add(self, method, resource_type, pk=None, body='', match_querystring=False,
query_string=None, status=200, json=None, related_resource=None):
# if we were passed a `json` argument,
# override the body and content_type
if json is not None:
body = json_module.dumps(json)
# body must be bytes
if isinstance(body, six.text_type):
body = body.encode('utf-8')
self._events.append({
'resource_type': resource_type,
'pk': pk,
'method': method,
'body': body,
'query_string': query_string,
'match_querystring': match_querystring,
'status': status,
'related_resource': related_resource,
})
def add_callback(self, method, url, callback, match_querystring=False,
content_type='text/plain'):
self._events.append({
'url': url,
'method': method,
'callback': callback,
'content_type': content_type,
'match_querystring': match_querystring,
})
@property
def calls(self):
return self._calls
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
success = type is None
self.stop(allow_assert=success)
self.reset()
return success
def activate(self, func):
evaldict = {'responses': self, 'func': func}
return get_wrapped(func, _wrapper_template, evaldict)
def _find_match(self, resource_type, **kwargs):
for match in self._events:
if kwargs['method'] != match['method']:
continue
if resource_type != match['resource_type']:
continue
if not self._has_event_match(match, **kwargs):
continue
break
else:
return None
if self.assert_all_requests_are_fired:
# for each found match remove the url from the stack
self._events.remove(match)
return match
def _has_event_match(self, match, **kwargs):
pk = kwargs.get('id', None) or kwargs.get('pk', None)
if str(match.get('pk')) != str(pk):
return False
if match.get('query_string') and kwargs.get('query_string') and \
match['query_string'] != urllib.parse.unquote(kwargs['query_string']):
return False
if match.get('related_resource') != kwargs.get('related_resource'):
return False
return True
def _on_request(self, event, **kwargs):
resource_type = camelize(event.event_type.replace('_request', ''))
match = self._find_match(resource_type, **kwargs)
method = kwargs['method']
# TODO(dcramer): find the correct class for this
if match is None:
error_msg = 'Service unavailable: {0} {1}'.format(method, resource_type)
response = ServiceRequestException(error_msg)
self._calls.add(resource_type, kwargs, response)
raise response
if 'body' in match and isinstance(match['body'], Exception):
self._calls.add(resource_type, kwargs, match['body'])
raise match['body']
if 'body' in match:
status = match['status']
body = match['body']
response = {
'status': status,
'body': body
}
self._calls.add(resource_type, kwargs, response)
return response
def start(self):
try:
from unittest import mock
except ImportError:
import mock
def unbound_on_send(event, *a, **kwargs):
return self._on_request(event, *event.args, **event.kwargs)
self._patcher_1 = mock.patch('zc_events.client.EventClient.emit_microservice_event')
self._patcher_2 = mock.patch('zc_events.event.ResourceRequestEvent.wait',
unbound_on_send)
self._patcher_1.start()
self._patcher_2.start()
def stop(self, allow_assert=True):
self._patcher_1.stop()
self._patcher_2.stop()
if allow_assert and self.assert_all_requests_are_fired and self._events:
raise AssertionError(
'Not all requests have been executed {0!r}'.format(
[(url['method'], url['url']) for url in self._events]))
# expose default mock namespace
mock = _default_mock = EventRequestsMock(assert_all_requests_are_fired=False)
__all__ = []
for __attr in (a for a in dir(_default_mock) if not a.startswith('_')):
__all__.append(__attr)
globals()[__attr] = getattr(_default_mock, __attr) | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/responses.py | responses.py |
import sys
import uuid
import boto3
from botocore.exceptions import ClientError
from six import raise_from
from zc_events.config import settings
class S3IOException(Exception):
pass
def save_string_contents_to_s3(stringified_data, aws_bucket_name, content_key=None,
aws_access_key_id=None, aws_secret_access_key=None):
"""Save data (provided in string format) to S3 bucket and return s3 key."""
aws_access_key_id = aws_access_key_id or settings.AWS_ACCESS_KEY_ID
aws_secret_access_key = aws_secret_access_key or settings.AWS_SECRET_ACCESS_KEY
try:
if not content_key:
content_key = str(uuid.uuid4())
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
s3 = session.resource('s3')
s3.Bucket(aws_bucket_name).put_object(Key=content_key, Body=stringified_data)
return content_key
except ClientError as error:
msg = 'Failed to save contents to S3. aws_bucket_name: {}, content_key: {}, ' \
'error: {}'.format(aws_bucket_name, content_key, error)
raise_from(S3IOException(msg), error)
def save_file_contents_to_s3(filepath, aws_bucket_name, content_key=None,
aws_access_key_id=None, aws_secret_access_key=None):
"""Upload a local file to S3 bucket and return S3 key."""
aws_access_key_id = aws_access_key_id or settings.AWS_ACCESS_KEY_ID
aws_secret_access_key = aws_secret_access_key or settings.AWS_SECRET_ACCESS_KEY
try:
if not content_key:
content_key = str(uuid.uuid4())
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
s3 = session.resource('s3')
s3.Bucket(aws_bucket_name).upload_file(filepath, content_key)
return content_key
except ClientError as error:
msg = 'Failed to save contents to S3. filepath: {}, aws_bucket_name: {}, content_key: {}, ' \
'error: {}'.format(filepath, aws_bucket_name, content_key, error)
raise_from(S3IOException(msg), error)
def read_s3_file_as_string(aws_bucket_name, content_key, delete=False,
aws_access_key_id=None, aws_secret_access_key=None):
"""Get the contents of an S3 file as string and optionally delete the file from the bucket."""
aws_access_key_id = aws_access_key_id or settings.AWS_ACCESS_KEY_ID
aws_secret_access_key = aws_secret_access_key or settings.AWS_SECRET_ACCESS_KEY
try:
session = boto3.session.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
s3 = session.resource('s3')
obj = s3.Object(aws_bucket_name, content_key).get()
ouput = obj['Body'].read()
if delete:
obj.delete()
return output
except ClientError as error:
msg = 'Failed to save contents to S3. aws_bucket_name: {}, content_key: {}, delete: {}, ' \
'error: {}'.format(aws_bucket_name, content_key, delete, error)
raise_from(S3IOException(msg), error) | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/aws.py | aws.py |
from inflection import underscore
import ujson
from zc_events.exceptions import RemoteResourceException
def _included_to_dict(included):
data = {}
if not included:
return data
for item in included:
data[(item['type'], item['id'])] = item
return data
def wrap_resource_from_response(response):
json_response = ujson.loads(response['body'])
if 'data' not in json_response:
msg = 'Error retrieving resource. Url: {0}. Content: {1}'.format(response.request.url, response.content)
raise RemoteResourceException(msg)
resource_data = json_response['data']
included_raw = json_response.get('included')
included_data = _included_to_dict(included_raw)
if isinstance(resource_data, list):
return RemoteResourceListWrapper(resource_data, included_data)
return RemoteResourceWrapper(resource_data, included_data)
class RemoteResourceWrapper(object):
def __init__(self, data, included=None):
result = self._get_from_include(included, data)
self.data = result if result else data
self.create_properties_from_data(included)
def __repr__(self):
return '<{0}: {1}>'.format(self.type, self.id)
def __str__(self):
return repr(self)
def _get_from_include(self, included, obj):
if included:
res = included.get((obj['type'], obj['id']))
return res
return None
def create_properties_from_data(self, included):
accepted_keys = ('id', 'type', 'self', 'related')
for key in list(self.data.keys()):
if key in accepted_keys:
setattr(self, key, self.data.get(key))
if 'attributes' in self.data:
attributes = self.data['attributes']
for key in list(attributes.keys()):
setattr(self, underscore(key), attributes[key])
if 'relationships' in self.data:
relationships = self.data['relationships']
for key in list(relationships.keys()):
if isinstance(relationships[key]['data'], list):
setattr(self, underscore(key), RemoteResourceListWrapper(relationships[key]['data'], included))
else:
got = None
if included:
got = self._get_from_include(included, relationships[key]['data'])
if got:
setattr(self, underscore(key), RemoteResourceWrapper(got, included))
else:
setattr(self, underscore(key), RemoteResourceWrapper(relationships[key]['data'], included))
if 'links' in relationships[key]:
setattr(getattr(self, underscore(key)), 'links',
RemoteResourceWrapper(relationships[key]['links'], None))
class RemoteResourceListWrapper(list):
def __init__(self, seq, included=None):
super(RemoteResourceListWrapper, self).__init__()
self.data = seq
self.add_items_from_data(included)
def add_items_from_data(self, included):
[self.append(RemoteResourceWrapper(x, included)) for x in self.data]
class Request(object):
"""Represents a request to the method handling the call.
Attributes:
data (dict or None): The data being transmitted by the client.
response_key (str or None): The response key used to make sure the client receives the response.
id (str): An automatically generated unique request id.
"""
def __init__(self, request):
for key, value in list(request.items()):
setattr(self, key, value) | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/request.py | request.py |
import uuid
import logging
from zc_events.email import generate_email_data
logger = logging.getLogger(__name__)
def send(event_client, from_email=None, to=None, cc=None, bcc=None, reply_to=None, subject=None,
plaintext_body=None, html_body=None, headers=None, files=None, attachments=None,
user_id=None, resource_type=None, resource_id=None, unsubscribe_group=None,
is_transactional=False, wait_for_response=False):
"""Send an email
Args:
event_client - an instatiated event client used for message passing.
Kwargs:
to - a list of email addresses to send to
from - a string that will be used in the "from" field
cc - a list of email addresses which will be copied on the communication
bcc - a list of email addresses that will be blind copied on the communication
reply_to - TBD
subject - The subject line of the email
plaintext_body - a plaintext email body
html_body - a html email body
user_id - TBD
headers - TBD
unsubscribe_group - TBD
attachments - TBD
files - TBD
is_transactional - bool to tell the email server if this is a transactional email (default False)
wait_for_response - bool to wait for a response or not (default False)
"""
email_uuid = uuid.uuid4()
msg = '''MICROSERVICE_SEND_EMAIL: Upload email with UUID {}, to {}, from {},
with attachments {} and files {}'''
logger.info(msg.format(email_uuid, to, from_email, attachments, files))
event_data = generate_email_data(email_uuid,
from_email=from_email, to=to, cc=cc, bcc=bcc, reply_to=reply_to, subject=subject,
plaintext_body=plaintext_body, html_body=html_body, headers=headers, files=files,
attachments=attachments, user_id=user_id, resource_type=resource_type,
resource_id=resource_id, unsubscribe_group=unsubscribe_group,
is_transactional=is_transactional)
if wait_for_response:
func = event_client.post
else:
func = event_client.post_no_wait
returned = func('send_email', event_data)
logger.info('MICROSERVICE_SEND_EMAIL: Sent email with UUID {} and data {}'.format(
email_uuid, event_data
))
return email_uuid, event_data, returned | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/contrib/email.py | email.py |
import logging
import ujson as json
import pika
import pika_pool as pika_pool_lib
import redis
import traceback
from zc_events.config import settings
from zc_events.responses import Response
from zc_events.exceptions import RequestTimeout
from zc_events.backends.rabbitmqredis.common import format_exception_response
from zc_events.backends.rabbitmqredis.server import respond
_DEFAULT_ROUTING_KEY = ''
_LOW_PRIORITY = 0
_HIGH_PRIORITY = 9
logger = logging.getLogger(__name__)
def _get_raw_response(redis_client, response_key):
try:
key_and_response = redis_client.blpop(response_key, 30)
if key_and_response is None:
raise RequestTimeout(detail='Timed out waiting for redis response')
response = key_and_response[1] # Index [0] is the response_key
logger.debug('zc_events got response response_key={response_key} response={response}'.format(
response_key=response_key,
response=response
))
except Exception as e:
msg = str(e)
ex_type = e.__class__.__name__
trace = traceback.format_exc()
logger.exception(
'zc_events exception waiting for response response_key={response_key} '
'exception={ex} message={msg} trace={trace}'.format(
response_key=response_key,
ex=ex_type,
msg=msg,
trace=trace
)
)
response = json.dumps(format_exception_response(ex_type, msg, trace))
return response
def _get_response(redis_client, response_key):
response = _get_raw_response(redis_client, response_key)
json_response = json.loads(response)
return Response(json_response)
def _place_on_queue(pika_pool, events_exchange, routing_key, priority, event_body):
event_queue_name = '{}-events'.format(settings.SERVICE_NAME)
queue_arguments = {
'x-max-priority': 10
}
response = None
logger.debug(
'zc_events placing on queue with the following '
'events_exchange={events_exchange} routing_key={routing_key} '
'event_body={event_body} priority={priority}'.format(
events_exchange=events_exchange, routing_key=routing_key, event_body=event_body, priority=priority
)
)
with pika_pool.acquire() as cxn:
cxn.channel.queue_declare(queue=event_queue_name, durable=True, arguments=queue_arguments)
response = cxn.channel.basic_publish(
events_exchange,
routing_key,
event_body,
pika.BasicProperties(
content_type='application/json',
content_encoding='utf-8',
priority=priority
)
)
if not response:
raise EmitEventException("Message may have failed to deliver")
return response
def _format_data(data, method, key):
data['_backend'] = {
'type': 'rabbitmqfanout',
'method': method,
'key': key
}
return {
'task': 'microservice.event',
'id': data['id'],
'args': [key, data],
'response_key': data['response_key']
}
class RabbitMqFanoutBackend(object):
"""A backend implementation using Rabbitmq fanout strategy and redis for responses.
The intent for this backend is to use a Rabbitmq fanout strategy, with redis for responding quickly.
It is also required that the consumers of the events on Rabbitmq be using celery 3 or 4.
Note:
It is not intended to be used directly by developers, but instead set and instantiated
through the RPC_BACKEND setting.
All public methods are backend implementations for the corresponding methods on EventClient,
except for the respond method.
Args:
redis_client (redis connection, (optional)): If this option is not provided, a redis connection
is gotten by using the EVENTS_REDIS_URL in your settings, using db 0.
pick_pool(pika_pool.QueuedPool, (optional)): The connection used by rabbitmq. If it is not provided,
a connection is established using the BROKER_URL from settings.
"""
def __init__(self, redis_client=None, pika_pool=None):
self._redis_client = redis_client
self._pika_pool = pika_pool
self.__events_exchange = None
@property
def _redis_client(self):
if not self.__redis_client:
pool = redis.ConnectionPool().from_url(settings.EVENTS_REDIS_URL, db=0)
self.__redis_client = redis.Redis(connection_pool=pool)
return self.__redis_client
@_redis_client.setter
def _redis_client(self, value):
self.__redis_client = value
@property
def _pika_pool(self):
if not self.__pika_pool:
pika_params = pika.URLParameters(settings.BROKER_URL)
pika_params.socket_timeout = 5
self.__pika_pool = pika_pool_lib.QueuedPool(
create=lambda: pika.BlockingConnection(parameters=pika_params),
max_size=10,
max_overflow=10,
timeout=10,
recycle=3600,
stale=45,
)
return self.__pika_pool
@_pika_pool.setter
def _pika_pool(self, value):
self.__pika_pool = value
@property
def _events_exchange(self):
if not self.__events_exchange:
self.__events_exchange = settings.EVENTS_EXCHANGE
return self.__events_exchange
def call(self, key, data):
return self.post(key, data)
def call_no_wait(self, key, data):
return self.post_no_wait(key, data)
def get(self, key, data):
data = _format_data(data, 'GET', key)
return self._enqueue_with_waiting(data)
def put(self, key, data):
data = _format_data(data, 'PUT', key)
return self._enqueue_with_waiting(data)
def put_no_wait(self, key, data):
data = _format_data(data, 'PUT', key)
return self._enqueue_without_waiting(data)
def post(self, key, data):
data = _format_data(data, 'POST', key)
return self._enqueue_with_waiting(data)
def post_no_wait(self, key, data):
data = _format_data(data, 'POST', key)
return self._enqueue_without_waiting(data)
def delete(self, key, data):
data = _format_data(data, 'DELETE', key)
return self._enqueue_with_waiting(data)
def delete_no_wait(self, key, data):
data = _format_data(data, 'DELETE', key)
return self._enqueue_without_waiting(data)
def _enqueue_without_waiting(self, data):
_place_on_queue(
self._pika_pool,
self._events_exchange,
_DEFAULT_ROUTING_KEY,
_LOW_PRIORITY,
json.dumps(data)
)
def _enqueue_with_waiting(self, data):
_place_on_queue(
self._pika_pool,
self._events_exchange,
_DEFAULT_ROUTING_KEY,
_HIGH_PRIORITY,
json.dumps(data)
)
return _get_response(self._redis_client, data['response_key'])
def respond(self, response_key, data):
"""
Respond to a request with the results.
Args:
response_key (str or None): If the response key is empty no response is done.
data (dict): The fully formatted response to be sent to the client and passed to the Response object.
Returns
bool: True if responded, False if it did not.
"""
logger.debug('zc_events responding with response_key={response_key} data={data}'.format(
response_key=response_key, data=data
))
if response_key:
respond(self._redis_client, response_key, data)
return True
return False | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/backends/rabbitmqredis/client.py | client.py |
import ujson as json
def is_compatible(func):
"""Determine if the function is a compatible view or viewset
Args:
func: the function in question.
Returns:
bool: If the function is compatible return True
"""
return hasattr(func, 'view_class') or hasattr(func, 'as_view')
def handle(func, data, relationship_viewset=None):
"""Handles dispatching the event to the view/viewset.
The `data` arg is the most important because it determines a lot of what goes on. The only
key that is required is the method key. Here are all keys:
'data': The normal response body type of data, and ends up in response.body.
'response_key': The key used to respond with.
'_backend': {
'method': A GET/POST/DELETE http verb
}
'headers': {
'pk': The primary key, used to determine if it is a detail route or not
'roles': The role assigned in the JWT in the request header
'user_id': The user making the request, part of the JWT
'http_host': unknown usage
'query_string': unknown usage
'relationship': unknown usage
'related_resource': unknown usage
}
Note:
This is setup with a JSONAPI, DRF and JWT configuration in mind. It is not
expected to work with regular django class based views.
Args:
func: The view/viewset to dispatch to.
data: A dictionary like object holding the keys documented above.
relationship_viewset: Optional viewset for relations.
"""
prepared_data = _prepare_data(data)
django_request = _create_request(prepared_data)
handler, handler_kwargs = _make_request_handler(prepared_data, func, relationship_viewset)
django_response = handler(django_request, **handler_kwargs)
return _serialize_response(django_response)
def _prepare_data(data):
body = data.get('data')
method = data.get('_backend').get('method')
headers = data.get('headers', {})
roles = headers.get('roles')
user_id = headers.get('user_id')
http_host = headers.get('http_host')
query_string = headers.get('query_string')
response_key = data.get('response_key')
pk = headers.get('pk')
relationship = headers.get('relationship')
related_resource = headers.get('related_resource')
return {
'roles': roles,
'user_id': user_id,
'body': body,
'method': method,
'http_host': http_host,
'query_string': query_string,
'response_key': response_key,
'pk': pk,
'relationship': relationship,
'related_resource': related_resource
}
def _create_request(event):
from zc_events.django_request import create_django_request_object
return create_django_request_object(
roles=event.get('roles'),
query_string=event.get('query_string'),
method=event.get('method'),
user_id=event.get('user_id', None),
body=event.get('body', None),
http_host=event.get('http_host', None)
)
def _make_request_handler(event, func, relationship_viewset):
# use ViewSetMixin because that is where the magic happens...
# https://github.com/encode/django-rest-framework/blob/73203e6b5920dcbe78e3309b7bf2803eb56db536/rest_framework/viewsets.py#L35
from rest_framework.viewsets import ViewSetMixin
if issubclass(func, ViewSetMixin):
viewset = func
view = None
else:
viewset = None
view = func
if not any([view, viewset, relationship_viewset]):
raise ImproperlyConfigured('handle_request_event must be passed either a view or viewset')
pk = event.get('pk', None)
relationship = event.get('relationship', None)
related_resource = event.get('related_resource', None)
handler_kwargs = {}
if view:
handler = view.as_view()
elif pk:
handler_kwargs['pk'] = pk
if relationship:
# Relationship views expect this kwarg as 'related_field'. See https://goo.gl/WW4ePd
handler_kwargs['related_field'] = relationship
handler = relationship_viewset.as_view()
elif related_resource:
handler = viewset.as_view({'get': related_resource})
handler_kwargs['related_resource'] = related_resource
else:
handler = _get_handler_for_viewset(viewset, is_detail=True)
else:
handler = _get_handler_for_viewset(viewset, is_detail=False)
return handler, handler_kwargs
def _get_handler_for_viewset(viewset, is_detail):
if is_detail:
methods = [
('get', 'retrieve'),
('put', 'update'),
('patch', 'partial_update'),
('delete', 'destroy'),
]
else:
methods = [
('get', 'list'),
('post', 'create'),
]
actions = {}
for method, action in methods:
if hasattr(viewset, action):
actions[method] = action
return viewset.as_view(actions)
def _serialize_response(response):
serialized = {
'data': None,
'has_errors': False,
'errors': []
}
if response.status_code >= 400:
serialized['has_errors'] = True
serialized['errors'] = response.data
else:
serialized['data'] = json.loads(response.rendered_content)
return serialized | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/backends/rabbitmqredis/viewset_handler.py | viewset_handler.py |
import logging
import ujson as json
import traceback
import six
from zc_events.config import settings
from zc_events.request import Request
from zc_events.exceptions import RequestTimeout
from zc_events.backends.rabbitmqredis.common import format_exception_response
from zc_events.backends.rabbitmqredis import viewset_handler
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
logger = logging.getLogger(__name__)
def respond(redis_client, response_key, data):
"""Low level responder for redis. It is intended to be used
by the backend_client.respond method and not directly by the end user.
Args:
redis_client: A redis client
response_key: The response key in which push the response to.
data: A dictionary like object to put into the redis response (json.dumps first)
"""
result = redis_client.rpush(response_key, json.dumps(data))
redis_client.expire(response_key, 60)
return result
def _handle_regular_func(func, data):
request = Request(data)
return {
'data': func(request),
'has_errors': False,
'errors': []
}
def _get_job_info(name):
val = settings.JOB_MAPPING.get(name)
if isinstance(val, Mapping):
return val.get('func'), val.get('relationship_viewset')
return val, None
def dispatch_task(name, data):
"""Dispatch the task for processing on the server side.
Example:
@app.task('microservice.event')
def listener(event_name, data):
from zc_events.backends import dispatch_task
return dispatch_task(event_name, data)
Note:
This function relies up `JOB_MAPPING` to be defined in your settings,
which is a dict with the key corresponding to a name and the value being
a function which will accept an `Request` paramater. If no name is found,
nothing happens with the request.
This function also needs the `RPC_BACKEND` setting to be instantiated
so it can use the `respond` method.
Args:
name (str): The name of the function to be called.
data (dict or None): The data to be used to populate the Request object.
"""
logger.info('zc_events received name={name} data={data}'.format(name=name, data=data))
func, relationship_viewset = _get_job_info(name)
if not func:
logger.info('zc_events did not find name={name}'.format(name=name))
return (False, False)
else:
try:
if viewset_handler.is_compatible(func):
response = viewset_handler.handle(
func,
data,
relationship_viewset=relationship_viewset
)
else:
response = _handle_regular_func(func, data)
except Exception as e:
msg = str(e)
ex_type = e.__class__.__name__
trace = traceback.format_exc()
logger.exception(
'zc_events dispatched func threw an exception: name={name} data={data} '
'exception={ex} message={msg} trace={trace}'.format(
name=name,
data=data,
ex=ex_type,
msg=msg,
trace=trace
)
)
response = format_exception_response(ex_type, msg, trace)
backend = settings.RPC_BACKEND
logger.info('zc_events finished name={name} data={data} response={response}'.format(
name=name, data=data, response=response))
return (backend.respond(data.get('response_key'), response), True) | zc-events | /zc_events-0.3.8-py3-none-any.whl/zc_events/backends/rabbitmqredis/server.py | server.py |
ZooKeeper Python bindings
=========================
This is a self-contained distribution of the ZooKeeper Python
bindings. It should build on any unix-like system by just running the
``setup.py`` script or using an install tool like pip, easy_install or
buildout. (Windows patches to the setup script are welcome. :)
For more information **except** building instructions, see the file
ORIGINAL-README included in the source distribution.
If you have setuptools or distribute in your python path, then you can
use the ``setup.py test`` command to run the tests. Note, however,
that the tests require that a testing ZooKeeper server be running on
port 22182 of the local machine.
You can find the source code of this distribution at
https://github.com/python-zk/zc-zookeeper-static
Changelog
=========
3.4.4 (2012-09-25)
------------------
Based on Zookeeper 3.4.4.
- Include patch https://issues.apache.org/jira/browse/ZOOKEEPER-1398:
zkpython corrupts session passwords that contain nulls.
3.4.3-5 (2012-08-23)
--------------------
Based on Zookeeper 3.4.3.
- Include patch https://issues.apache.org/jira/browse/ZOOKEEPER-1398:
zkpython corrupts session passwords that contain nulls.
3.4.3-4 (2012-08-16)
--------------------
Based on Zookeeper 3.4.3.
- Include patch https://issues.apache.org/jira/browse/ZOOKEEPER-1339:
C client didn't build with `--enable-debug`.
3.4.3-3 (2012-06-06)
--------------------
Based on Zookeeper 3.4.3.
- Include patch https://issues.apache.org/jira/browse/ZOOKEEPER-1318:
In Python binding, get_children (and get and exists, and probably others)
with expired session doesn't raise exception properly.
- Include patch https://issues.apache.org/jira/browse/ZOOKEEPER-1431:
zkpython: async calls leak memory
3.4.3 (2012-04-20)
------------------
Based on Zookeeper 3.4.3.
3.3.5 (2012-03-24)
------------------
Based on Zookeeper 3.3.5.
| zc-zookeeper-static | /zc-zookeeper-static-3.4.4.zip/zc-zookeeper-static-3.4.4/README.rst | README.rst |
Early version of ZooKeeper bindings for Python. All functions are imported as methods into the zookeeper module.
Please do not rely on APIs staying constant in the short term. The handling of exceptions and failure modes is one area that is subject to change.
DEPENDENCIES:
-------------
This has only been tested against SVN (i.e. 3.2.0 in development) but should work against 3.1.1.
You will need the Python development headers installed to build the module - on many package-management systems, these can be found in python-devel.
Python >= 2.6 is required. We have tested against 2.6. We have not tested against 3.x.
BUILD AND INSTALL:
-------------------
To install, make sure that the C client has been built and that the libraries are installed in /usr/local/lib (or change this directory in setup.py). Then run:
ant install
from zookeeper/src/contrib/zkpython/.
To test, run ant test from the same directory.
You can compile the module without installing by running
ant compile
In order to use the module, zookeeper.so must be in your PYTHONPATH or in one of the directories referenced by sys.path. Running ant install should make sure that this is the case, but if you only run ant compile you probably need to add build/contrib/zkpython/* to PYTHONPATH to find the module. The C client libraries must be in a system library path, or LD_LIBRARY_PATH or DYLD_LIBRARY_PATH (Mac OS) for the module to work correctly, otherwise you will see a library not found error when trying to import the module.
NAMING CONVENTIONS:
--------------------
All methods that in the C library are zoo_fn_name have been implemented as zookeeper.fn_name. The exception is any function that has a watch function argument is named without the 'w' prefix (for example, zoo_wexists becomes zookeeper.exists). The variants of these functions without the watch argument (i.e. zoo_exists) have not been implemented on the understanding that they are superseded by the zoo_w* API.
Enums and integer constants that begin ZOO_int_name are named as zookeeper.int_name.
PARAMETER CHANGES:
------------------
Zookeeper handles are represented as integers to avoid marshalling the entire structure for every call. Therefore they are opaque from Python.
Any parameter that is used to provide arguments to callback methods is not exposed in the API. Python provides better mechanisms for providing a closure to be called in the future.
Every callback gets passed the handle of the ZooKeeper instance used to register the callback.
DATA TYPES:
-----------
ACL_vectors are lists of dictionaries. Stat structures are dictionaries. String_vectors are lists of strings.
EXCEPTIONS AND ERROR HANDLING:
------------------------------
Currently synchronous calls indicate failure by throwing an exception (note that this includes the synchronous calls to set up asynchronous completion callbacks!). Success is returned as an integer.
Callbacks signify failure by having the integer response code passed in.
WHAT'S NEW IN 0.4:
------------------
More test coverage.
Better reference counting, fixing at least two serious bugs.
Out-of-range zhandles are now checked, fixing a potential security hole.
Docstrings! Editing and cleanup required, but most of the text is there.
zookeeper.set_watcher is now implemented correctly.
zookeeper.client_id is now implemented correctly. zookeeper.init now respects the client_id parameter.
get_context and set_context have been removed from the API. The context mechanism is used by PyZK to store the callables that are dispatched by C-side watchers. Messing with this from Python-side causes bugs very quickly. You should wrap all desired context up in a callable and then use zookeeper.set_watcher to attach it to the global watcher.
Many methods now have optional parameters (usually if you can specify a watch, it's optional). The only time where genuinely optional parameters are still mandatory is when a required parameters comes after it. Currently we still respect the ZK C client parameter ordering. For example, you can simply connect with zookeeper.init("host:port") and ignore the other three parameters.
WHAT'S NEW IN 0.3:
------------------
Some tests in zkpython/test. More to follow!
A variety of bugfixes.
Changed the way methods return results - all responses are integers now, for the client to convert to a string if it needs.
WHAT'S NEW IN 0.2:
------------------
The asynchronous API is now implemented (see zookeeper.a*).
Most enums defined in zookeeper.h are now added as constants to the module.
_set2 and a few other edge API calls have been implemented. The module is now nearly 100% feature complete!
A reference count error was tracked down and killed. More probably lurk in there!
WHAT'S NOT DONE / KNOWN ISSUES / FUTURE WORK:
---------------------------------------------
1. There may well be more memory leaks / reference count issues; however I am more confident that common paths are relatively safe.
2. There probably needs to be a more Pythonic Python-side wrapper for these functions (e.g. a zookeeper object, the ability to iterate through a tree of zk nodes)
3. Docstrings need a cleanup.
4. The way exceptions and error codes are returned needs looking at. Currently synchronous calls throw exceptions on everything but ZOK return, but asynchronous completions are simply passed the error code. Async. functions should never throw an exception on the C-side as they are practically impossible to catch. For the sync. functions, exceptions seem more reasonable, but some cases are certainly not exceptional.
Bug reports / comments very welcome!
Henry Robinson [email protected] | zc-zookeeper-static | /zc-zookeeper-static-3.4.4.zip/zc-zookeeper-static-3.4.4/ORIGINAL-README | ORIGINAL-README |
import cPickle
import logging
import os
import subprocess
import sys
import time
from ZODB.FileStorage.format import FileStorageFormatter, CorruptedDataError
from ZODB.utils import p64, u64, z64
from ZODB.FileStorage.format import TRANS_HDR_LEN
import ZODB.FileStorage
import ZODB.FileStorage.fspack
import ZODB.fsIndex
import ZODB.TimeStamp
GIG = 1<<30
def Packer(sleep=0, transform=None, untransform=None):
def packer(storage, referencesf, stop, gc):
return FileStoragePacker(storage, stop, sleep, transform, untransform
).pack()
return packer
packer = Packer(0)
packer1 = Packer(1)
packer2 = Packer(2)
packer4 = Packer(3)
packer8 = Packer(4)
class FileStoragePacker(FileStorageFormatter):
def __init__(self, storage, stop,
sleep=0, transform=None, untransform=None):
self.storage = storage
self._name = path = storage._file.name
self.sleep = sleep
self.transform_option = transform
self.untransform_option = untransform
# We open our own handle on the storage so that much of pack can
# proceed in parallel. It's important to close this file at every
# return point, else on Windows the caller won't be able to rename
# or remove the storage file.
self._file = open(path, "rb")
self._stop = stop
self.locked = 0
# The packer needs to acquire the parent's commit lock
# during the copying stage, so the two sets of lock acquire
# and release methods are passed to the constructor.
self._lock_acquire = storage._lock_acquire
self._lock_release = storage._lock_release
self._commit_lock_acquire = storage._commit_lock_acquire
self._commit_lock_release = storage._commit_lock_release
self._lock_acquire()
try:
storage._file.seek(0, 2)
self.file_end = storage._file.tell()
finally:
self._lock_release()
self.ltid = z64
def pack(self):
script = self._name+'.packscript'
open(script, 'w').write(pack_script_template % dict(
path = self._name,
stop = self._stop,
size = self.file_end,
syspath = sys.path,
blob_dir = self.storage.blob_dir,
sleep = self.sleep,
transform = self.transform_option,
untransform = self.untransform_option,
))
for name in 'error', 'log':
name = self._name+'.pack'+name
if os.path.exists(name):
os.remove(name)
proc = subprocess.Popen(
(sys.executable, script),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True,
)
proc.stdin.close()
out = proc.stdout.read()
if proc.wait():
if os.path.exists(self._name+'.packerror'):
v = cPickle.Unpickler(open(self._name+'.packerror', 'rb')
).load()
os.remove(self._name+'.packerror')
raise v
raise RuntimeError('The Pack subprocess failed\n'
+'-'*60+out+'-'*60+'\n')
packindex_path = self._name+".packindex"
if not os.path.exists(packindex_path):
return # already packed or pack didn't benefit
index, opos = cPickle.Unpickler(open(packindex_path, 'rb')).load()
os.remove(packindex_path)
os.remove(self._name+".packscript")
output = open(self._name + ".pack", "r+b")
output.seek(0, 2)
assert output.tell() == opos
self.copyRest(self.file_end, output, index)
# OK, we've copied everything. Now we need to wrap things up.
pos = output.tell()
output.close()
return pos, index
def copyRest(self, input_pos, output, index):
# Copy data records written since packing started.
self._commit_lock_acquire()
self.locked = 1
# Re-open the file in unbuffered mode.
# The main thread may write new transactions to the file,
# which creates the possibility that we will read a status
# 'c' transaction into the pack thread's stdio buffer even
# though we're acquiring the commit lock. Transactions
# can still be in progress throughout much of packing, and
# are written to the same physical file but via a distinct
# Python file object. The code used to leave off the
# trailing 0 argument, and then on every platform except
# native Windows it was observed that we could read stale
# data from the tail end of the file.
self._file = open(self._name, "rb", 0)
try:
try:
while 1:
# The call below will raise CorruptedDataError at EOF.
input_pos = self._copyNewTrans(
input_pos, output, index,
self._commit_lock_acquire, self._commit_lock_release)
except CorruptedDataError, err:
# The last call to copyOne() will raise
# CorruptedDataError, because it will attempt to read past
# the end of the file. Double-check that the exception
# occurred for this reason.
self._file.seek(0, 2)
endpos = self._file.tell()
if endpos != err.pos:
raise
finally:
self._file.close()
transform = None
def _copyNewTrans(self, input_pos, output, index,
acquire=None, release=None):
tindex = {}
copier = PackCopier(output, index, tindex)
th = self._read_txn_header(input_pos)
if release is not None:
release()
transform = self.transform
start_time = time.time()
output_tpos = output.tell()
copier.setTxnPos(output_tpos)
output.write(th.asString())
tend = input_pos + th.tlen
input_pos += th.headerlen()
while input_pos < tend:
h = self._read_data_header(input_pos)
prev_txn = None
if h.plen:
data = self._file.read(h.plen)
else:
# If a current record has a backpointer, fetch
# refs and data from the backpointer. We need
# to write the data in the new record.
data = self.fetchBackpointer(h.oid, h.back)
if h.back:
prev_txn = self.getTxnFromData(h.oid, h.back)
if data and (transform is not None):
data = transform(data)
copier.copy(h.oid, h.tid, data, prev_txn,
output_tpos, output.tell())
input_pos += h.recordlen()
output_pos = output.tell()
tlen = p64(output_pos - output_tpos)
output.write(tlen)
output_pos += 8
if tlen != th.tlen:
# Update the transaction length
output.seek(output_tpos + 8)
output.write(tlen)
output.seek(output_pos)
index.update(tindex)
tindex.clear()
time.sleep((time.time()-start_time)*self.sleep)
if acquire is not None:
acquire()
return input_pos + 8
def fetchBackpointer(self, oid, back):
if back == 0:
return None
data, tid = self._loadBackTxn(oid, back, 0)
return data
class PackCopier(ZODB.FileStorage.fspack.PackCopier):
def _txn_find(self, tid, stop_at_pack):
# _pos always points just past the last transaction
pos = self._pos
while pos > 4:
self._file.seek(pos - 8)
pos = pos - u64(self._file.read(8)) - 8
self._file.seek(pos)
h = self._file.read(TRANS_HDR_LEN)
_tid = h[:8]
if _tid == tid:
return pos
if stop_at_pack:
if h[16] == 'p':
break
return None
pack_script_template = """
import sys, logging
sys.path[:] = %(syspath)r
import cPickle
import zc.FileStorage
logging.getLogger().setLevel(logging.INFO)
handler = logging.FileHandler(%(path)r+'.packlog')
handler.setFormatter(logging.Formatter(
'%%(asctime)s %%(name)s %%(levelname)s %%(message)s'))
logging.getLogger().addHandler(handler)
try:
packer = zc.FileStorage.PackProcess(%(path)r, %(stop)r, %(size)r,
%(blob_dir)r, %(sleep)s,
%(transform)r, %(untransform)r)
packer.pack()
except Exception, v:
logging.exception('packing')
try:
v = cPickle.dumps(v)
except Exception:
pass
else:
open(%(path)r+'.packerror', 'w').write(v)
raise
"""
class PackProcess(FileStoragePacker):
def __init__(self, path, stop, current_size, blob_dir,
sleep, transform, untransform):
self._name = path
# We open our own handle on the storage so that much of pack can
# proceed in parallel. It's important to close this file at every
# return point, else on Windows the caller won't be able to rename
# or remove the storage file.
if blob_dir:
self.pack_blobs = True
self.blob_removed = open(os.path.join(blob_dir, '.removed'), 'w')
else:
self.pack_blobs = False
self._file = open(path, "rb")
self._name = path
self._stop = stop
self.locked = 0
self.file_end = current_size
self.ltid = z64
self._freecache = _freefunc(self._file)
self.sleep = sleep
if isinstance(transform, str):
transform = getglobal(transform)
self.transform = transform
if isinstance(untransform, str):
untransform = getglobal(untransform)
self.untransform = untransform
logging.info('packing to %s, sleep %s',
ZODB.TimeStamp.TimeStamp(self._stop),
self.sleep)
def _read_txn_header(self, pos, tid=None):
self._freecache(pos)
return FileStoragePacker._read_txn_header(self, pos, tid)
def pack(self):
packed, index, packpos = self.buildPackIndex(self._stop, self.file_end)
logging.info('initial scan %s objects at %s', len(index), packpos)
if packed:
# nothing to do
logging.info('done, nothing to do')
self._file.close()
return
logging.info('copy to pack time')
output = open(self._name + ".pack", "w+b")
self._freeoutputcache = _freefunc(output)
index, new_pos = self.copyToPacktime(packpos, index, output)
if new_pos == packpos:
# pack didn't free any data. there's no point in continuing.
self._file.close()
output.close()
os.remove(self._name + ".pack")
logging.info('done, no decrease')
return
logging.info('copy from pack time')
self._freecache = self._freeoutputcache = lambda pos: None
self.copyFromPacktime(packpos, self.file_end, output, index)
# Save the index so the parent process can use it as a starting point.
f = open(self._name + ".packindex", 'wb')
cPickle.Pickler(f, 1).dump((index, output.tell()))
f.close()
output.flush()
os.fsync(output.fileno())
output.close()
self._file.close()
logging.info('packscript done')
def buildPackIndex(self, stop, file_end):
index = ZODB.fsIndex.fsIndex()
pos = 4L
packed = True
log_pos = pos
while pos < file_end:
start_time = time.time()
th = self._read_txn_header(pos)
if th.tid > stop:
break
self.checkTxn(th, pos)
if th.status != "p":
packed = False
tpos = pos
end = pos + th.tlen
pos += th.headerlen()
while pos < end:
dh = self._read_data_header(pos)
self.checkData(th, tpos, dh, pos)
if dh.plen or dh.back:
index[dh.oid] = pos
else:
# deleted
if dh.oid in index:
del index[dh.oid]
pos += dh.recordlen()
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
if pos - log_pos > GIG:
logging.info("read %s" % pos)
log_pos = pos
time.sleep((time.time()-start_time)*self.sleep)
return packed, index, pos
def copyToPacktime(self, packpos, index, output):
pos = new_pos = self._metadata_size
self._file.seek(0)
output.write(self._file.read(self._metadata_size))
new_index = ZODB.fsIndex.fsIndex()
pack_blobs = self.pack_blobs
transform = self.transform
untransform = self.untransform
if untransform is None:
is_blob_record = ZODB.blob.is_blob_record
else:
_is_blob_record = ZODB.blob.is_blob_record
def is_blob_record(data):
return _is_blob_record(untransform(data))
log_pos = pos
while pos < packpos:
start_time = time.time()
th = self._read_txn_header(pos)
new_tpos = 0L
tend = pos + th.tlen
pos += th.headerlen()
while pos < tend:
h = self._read_data_header(pos)
if index.get(h.oid) != pos:
pos += h.recordlen()
if pack_blobs:
if h.plen:
data = self._file.read(h.plen)
else:
data = self.fetchDataViaBackpointer(h.oid, h.back)
if data and is_blob_record(data):
# We need to remove the blob record. Maybe we
# need to remove oid.
# But first, we need to make sure the
# record we're looking at isn't a dup of
# the current record. There's a bug in ZEO
# blob support that causes duplicate data
# records.
rpos = index.get(h.oid)
is_dup = (rpos and
self._read_data_header(rpos).tid == h.tid)
if not is_dup:
# Note that we delete the revision.
# If rpos was None, then we could
# remove the oid. What if somehow,
# another blob update happened after
# the deletion. This shouldn't happen,
# but we can leave it to the cleanup
# code to take care of removing the
# directory for us.
self.blob_removed.write(
(h.oid+h.tid).encode('hex')+'\n')
continue
pos += h.recordlen()
# If we are going to copy any data, we need to copy
# the transaction header. Note that we will need to
# patch up the transaction length when we are done.
if not new_tpos:
th.status = "p"
new_tpos = output.tell()
output.write(th.asString())
if h.plen:
data = self._file.read(h.plen)
else:
# If a current record has a backpointer, fetch
# refs and data from the backpointer. We need
# to write the data in the new record.
data = self.fetchBackpointer(h.oid, h.back) or ''
if transform is not None:
data = self.transform(data)
h.prev = 0
h.back = 0
h.plen = len(data)
h.tloc = new_tpos
new_index[h.oid] = output.tell()
output.write(h.asString())
output.write(data)
if not data:
# Packed records never have backpointers (?).
# If there is no data, write a z64 backpointer.
# This is a George Bailey event.
output.write(z64)
if new_tpos:
new_pos = output.tell()
tlen = p64(new_pos - new_tpos)
output.write(tlen)
new_pos += 8
if tlen != th.tlen:
# Update the transaction length
output.seek(new_tpos + 8)
output.write(tlen)
output.seek(new_pos)
self._freeoutputcache(new_pos)
pos += 8
if pos - log_pos > GIG:
logging.info("read %s" % pos)
log_pos = pos
time.sleep((time.time()-start_time)*self.sleep)
return new_index, new_pos
def fetchDataViaBackpointer(self, oid, back):
"""Return the data for oid via backpointer back
If `back` is 0 or ultimately resolves to 0, return None.
In this case, the transaction undoes the object
creation.
"""
if back == 0:
return None
data, tid = self._loadBackTxn(oid, back, 0)
return data
def copyFromPacktime(self, pos, file_end, output, index):
log_pos = pos
while pos < file_end:
start_time = time.time()
pos = self._copyNewTrans(pos, output, index)
self._freeoutputcache(output.tell())
if pos - log_pos > GIG:
logging.info("read %s" % pos)
log_pos = pos
time.sleep((time.time()-start_time)*self.sleep)
return pos
def getglobal(s):
module, expr = s.split(':', 1)
return eval(expr, __import__(module, {}, {}, ['*']).__dict__)
def _freefunc(f):
# Return an posix_fadvise-based cache freeer.
try:
import _zc_FileStorage_posix_fadvise
except ImportError:
return lambda pos: None
fd = f.fileno()
last = [0]
def _free(pos):
if pos == 4:
last[0] = 0
elif (pos - last[0]) < 50000000:
return
last[0] = pos
_zc_FileStorage_posix_fadvise.advise(
fd, 0, last[0]-10000,
_zc_FileStorage_posix_fadvise.POSIX_FADV_DONTNEED)
return _free | zc.FileStorage | /zc.FileStorage-1.2.0.tar.gz/zc.FileStorage-1.2.0/src/zc/FileStorage/__init__.py | __init__.py |
import zc.ajaxform.application
import zc.ajaxform.interfaces
import zope.app.form.browser.interfaces
import zope.app.form.interfaces
import zope.cachedescriptors.property
import zope.component
import zope.formlib.form
import zope.publisher.interfaces.browser
import zope.security.checker
class FormType(type):
def __get__(self, inst, class_):
if inst is None:
return self
return self(inst)
_FormBase = FormType('_FormBase', (object, ), {})
class Form(_FormBase):
zope.interface.implements(
zope.publisher.interfaces.browser.IBrowserPublisher)
__Security_checker__ = zope.security.checker.NamesChecker((
'__call__', 'browserDefault', 'publishTraverse'))
def __init__(self, app, request=None):
self.app = app
if request is None:
request = app.request
self.request = request
self.context = app.context
@zope.cachedescriptors.property.Lazy
def prefix(self):
return self.base_href.replace('/', '.')
@zope.cachedescriptors.property.Lazy
def base_href(self):
base_href = getattr(self.app, 'base_href', None)
if base_href is not None:
base_href += '/'
else:
base_href = ''
return base_href+self.__class__.__name__
def get_definition(self):
widgets = zope.formlib.form.setUpWidgets(
self.form_fields, self.prefix, self.context, self.request,
ignore_request=True)
for widget in widgets:
# Make sure that we have the right type of widget.
assert hasattr(widget, 'js_config'), (
'Could not find a js widget for %r' % widget.name)
l_fields = []
if hasattr(self, 'leftFields'):
l_fields = self.leftFields
return dict(
widgets=[widget.js_config() for widget in widgets],
actions=[dict(label=action.label,
url="%s/%s" % (self.base_href,
action.__name__.split('.')[-1]),
name=action.__name__)
for action in self.actions],
left_fields=dict((widget.id, widget.id in l_fields
) for widget in widgets),
prefix=self.prefix
)
def __call__(self):
"""Return rendered js widget configs
"""
return zc.ajaxform.application.result(
dict(definition=self.get_definition()))
def publishTraverse(self, request, name):
result = getattr(self, name, None)
if isinstance(result, zope.formlib.form.Action):
return Action(self, result)
raise zope.publisher.interfaces.NotFound(self, name, request)
def browserDefault(self, request):
return self, ()
def getObjectData(self, ob, extra=()):
widgets = zope.formlib.form.setUpWidgets(
self.form_fields, self.prefix, self.context, self.request,
ignore_request=True)
result = {}
for widget in widgets:
if widget.id in extra:
result[widget.id] = extra[widget.id]
else:
v = widget.formValue(widget.context.get(ob))
if v is not None:
result[widget.id] = v
return result
class Action(object):
zope.interface.implementsOnly(
zope.publisher.interfaces.browser.IBrowserPublisher)
def __init__(self, form, action):
self.form = form
self.action = action
def __call__(self):
widgets = zope.formlib.form.setUpWidgets(
self.form.form_fields,
self.form.prefix,
self.form.context,
self.form.request,
ignore_request=True)
data = {}
field_errors = {}
for input, widget in widgets.__iter_input_and_widget__():
if (input and
zope.app.form.interfaces.IInputWidget.providedBy(widget)
):
if (not widget.hasInput()) and not widget.required:
continue
name = widget.name
if name.startswith(self.form.prefix+'.'):
name = name[len(self.form.prefix)+1:]
try:
data[name] = widget.getInputValue()
except zope.app.form.interfaces.InputErrors, error:
if not isinstance(error, basestring):
view = zope.component.getMultiAdapter(
(error, self.form.request),
zope.app.form.browser.interfaces.
IWidgetInputErrorView,
)
error = view.snippet()
error = error[error.find('>')+1:error.find('<',2)]
field_errors[widget.id] = error
if field_errors:
return zc.ajaxform.application.result(dict(errors=field_errors))
# XXX invariants and action conditions
# XXX action validator and failure handlers
return zc.ajaxform.application.result(self.action.success(data))
def browserDefault(self, request):
return self, () | zc.ajaxform | /zc.ajaxform-0.7.0.tar.gz/zc.ajaxform-0.7.0/src/zc/ajaxform/form.py | form.py |
import zc.ajaxform.application
import zc.ajaxform.interfaces
import zc.ajaxform.widgets
import zc.ajaxform.form
import zope.component
import zope.interface
import zope.formlib
import zope.schema
class IAddress(zope.interface.Interface):
street = zope.schema.TextLine(
title = u"Street",
description = u"The street",
)
city = zope.schema.TextLine(
title = u"City",
description = u"The city",
)
awesomeness = zope.schema.Int(
title = u"Awesomeness",
description = u"The awesomeness on a scale of 1 to 10",
min = 1,
max = 10,
)
class Pets(zc.sourcefactory.basic.BasicSourceFactory):
def getValues(self):
return (u'Dog', u'Cat', u'Fish')
class Pet(zope.schema.TextLine):
"""A textline representing a pet.
This is just a textline, but we also have a source of common pets that
the user can choose from.
"""
class IPerson(zope.interface.Interface):
first_name = zope.schema.TextLine(
title = u"First name",
description = u"Given name.",
default= u'Happy'
)
last_name = zope.schema.TextLine(
title = u"Last name",
description = u"Family name.",
default= u'Camper'
)
favorite_color = zope.schema.TextLine(
title = u"Favorite color",
required = False,
default= u'Blue'
)
age = zope.schema.Int(
title = u"Age",
description = u"Age in years",
min = 0,
max = 200,
default= 23
)
happy = zope.schema.Bool(
title = u"Happy",
description = u"Are they happy?",
default= True
)
pet = Pet(
title=u'Pet',
description=u'This person\'s best friend.',
required=False,
)
temperment = zope.schema.Choice(
title = u"Temperment",
description = u"What is the person like?",
values = ['Nice', 'Mean', 'Ornery', 'Right Neighborly'],
default = u'Right Neighborly'
)
weight = zope.schema.Decimal(
title = u"Weight",
description = u"Weight in lbs?"
)
description = zope.schema.Text(
title = u"Description",
description = u"What do they look like?",
default = u'10ft tall\nRazor sharp scales.'
)
secret = zope.schema.TextLine(
title = u"Secret Key",
description = u"Don't tell anybody",
default = u'5ecret sauce'
)
siblings = zope.schema.Int(
title = u"Siblings",
description = u"Number of siblings",
min = 0,
max = 8,
default = 1
)
addresses = zope.schema.List(
title = u'Addresses',
description = u"All my wonderful homes",
value_type = zope.schema.Object(schema=IAddress),
default= [{'street':'123 fake street',
'city': 'fakeville',
'awesomeness': '9'},
{'street':'345 false street',
'city': 'falsetown',
'awesomeness': '9001'}
]
)
other = zope.schema.Text(
title = u"Other",
description = u"Any other notes",
default = u"I've got a magic toenail"
)
class Person:
zope.interface.implements(IPerson)
def __init__(self, first_name, last_name, favorite_color, age, happy,
pet, temperment, weight, description, secret, siblings,
addresses, other):
self.first_name = first_name
self.last_name = last_name
self.favorite_color = favorite_color
self.age = age
self.happy = happy
self.pet = pet
self.temperment = temperment
self.weight = weight
self.description = description
self.secret = secret
self.siblings = siblings
self.addresses = addresses
self.other = other
class FormExample(zc.ajaxform.application.Application):
resource_library_name = None
class ExampleForm(zc.ajaxform.form.Form):
leftFields = ('first_name', 'last_name', 'age', 'other')
form_fields = zope.formlib.form.Fields(IPerson)
form_fields['secret'].custom_widget = zc.ajaxform.widgets.Hidden
form_fields['siblings'].custom_widget = zc.ajaxform.widgets.NumberSpinner
@zope.formlib.form.action("Register")
def register(self, action, data):
person = Person(**data)
return dict(
data = data,
self_class_name = self.__class__.__name__,
self_app_class_name = self.app.__class__.__name__,
self_context_class_name = self.context.__class__.__name__
)
class PetWidget(zc.ajaxform.widgets.ComboBox):
zope.component.adapts(
Pet,
zc.ajaxform.interfaces.IAjaxRequest)
zope.interface.implements(
zc.ajaxform.interfaces.IInputWidget)
def __init__(self, context, request):
super(PetWidget, self).__init__(context, Pets(), request) | zc.ajaxform | /zc.ajaxform-0.7.0.tar.gz/zc.ajaxform-0.7.0/src/zc/ajaxform/form_example.py | form_example.py |
import logging
import simplejson
import zc.ajaxform.interfaces
import zc.resourcelibrary
import zope.app.exception.browser.unauthorized
import zope.app.pagetemplate
import zope.cachedescriptors.property
import zope.component
import zope.exceptions.interfaces
import zope.interface
import zope.publisher.browser
import zope.publisher.interfaces.browser
import zope.security.proxy
import zope.traversing.interfaces
def result(data):
if not data:
data = {}
return simplejson.dumps(data)
class _method(object):
zope.interface.implements(
zope.publisher.interfaces.browser.IBrowserPublisher)
__Security_checker__ = zope.security.checker.NamesChecker(
('__call__', 'browserDefault')
)
def __init__(self, inst, func):
self.im_self = inst
self.im_func = func
def __call__(self, *a, **k):
return self.im_func(self.im_self, *a, **k)
def browserDefault(self, request):
return self, ()
class _jsonmethod(_method):
def __call__(self, *a, **k):
return result(self.im_func(self.im_self, *a, **k))
class page(object):
_method_class = _method
def __init__(self, func):
self.func = func
def __get__(self, inst, cls):
if inst is None:
return self
return self._method_class(inst, self.func)
class jsonpage(page):
_method_class = _jsonmethod
class AttributeTraversable(object):
zope.interface.implements(
zope.publisher.interfaces.browser.IBrowserPublisher)
def publishTraverse(self, request, name):
name = name.replace('.', '_')
result = getattr(self, name, None)
if zope.publisher.interfaces.browser.IBrowserPublisher.providedBy(
result):
zope.interface.directlyProvides(
request,
zc.ajaxform.interfaces.IAjaxRequest,
zope.interface.directlyProvidedBy(request),
)
return result
raise zope.publisher.interfaces.NotFound(self, name, request)
@zope.cachedescriptors.property.Lazy
def __parent__(self):
return self.context
class PublicTraversable(object):
__Security_checker__ = zope.security.checker.NamesChecker((
'browserDefault', 'publishTraverse'))
class Trusted(object):
def __init__(self, context, *a, **kw):
context = zope.security.proxy.removeSecurityProxy(context)
super(Trusted, self).__init__(context, *a, **kw)
class Application(AttributeTraversable):
zope.component.adapts(
zope.traversing.interfaces.IContainmentRoot,
zope.publisher.interfaces.browser.IBrowserRequest,
)
def __init__(self, context, request):
self.context = context
self.request = request
def browserDefault(self, request):
return self, ('index.html', )
def template(self):
return '<html><head></head></html>'
@page
def index_html(self):
try:
library = self.resource_library_name
except AttributeError:
raise AttributeError(
"No resource_library_name attribute is defined.\n"
"This attribute is required to specify the name of a\n"
"library to use (need). It may be set to None to avoid\n"
"requiring a resource library."
)
if library is not None:
zc.resourcelibrary.need(library)
return self.template()
class SubApplication(AttributeTraversable):
def __init__(self, context, request, base_href=None):
self.context = context
self.request = request
if base_href is not None:
self.base_href = base_href
class traverser(object):
def __init__(self, func, inst=None):
self.func = func
self.inst = inst
def __get__(self, inst, cls):
if inst is None:
return self
return traverser(self.func, inst)
zope.interface.implements(
zope.publisher.interfaces.browser.IBrowserPublisher)
__Security_checker__ = zope.security.checker.NamesChecker((
'publishTraverse', ))
def publishTraverse(self, request, name):
return self.func(self.inst, request, name)
def __call__(self, *args, **kw):
if self.inst is None:
return self.func(*args, **kw)
else:
return self.func(self.inst, *args, **kw)
class UserError:
zope.interface.implements(
zope.publisher.interfaces.browser.IBrowserPublisher)
zope.component.adapts(zope.exceptions.interfaces.IUserError,
zc.ajaxform.interfaces.IAjaxRequest)
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
return simplejson.dumps(dict(
error = str(self.context),
))
class ExceptionView(UserError):
zope.component.adapts(Exception,
zc.ajaxform.interfaces.IAjaxRequest)
def __call__(self):
self.request.response.setStatus(500)
logger = logging.getLogger(__name__)
logger.exception(
'SysError created by zc.ajaxform'
)
return simplejson.dumps(dict(
error = "%s: %s" % (self.context.__class__.__name__, self.context),
)) | zc.ajaxform | /zc.ajaxform-0.7.0.tar.gz/zc.ajaxform-0.7.0/src/zc/ajaxform/application.py | application.py |
import decimal
import pytz
import rwproperty
import zc.ajaxform.interfaces
import zc.form.interfaces
import zc.sourcefactory.basic
import zope.app.form
import zope.app.form.browser.interfaces
import zope.app.form.browser.widget
import zope.app.form.interfaces
import zope.cachedescriptors.property
import zope.component
import zope.html.field
import zope.interface
import zope.schema.interfaces
class Base(zope.app.form.InputWidget):
zope.interface.implements(zc.ajaxform.interfaces.IInputWidget)
widget_constructor = None
def __init__(self, context, request):
self.custom_type = None
self.id = context.__name__
super(Base, self).__init__(context, request)
def js_config(self, **kw):
config = dict(
fieldLabel = self.label,
fieldHint = self.hint,
name = self.id,
id = self.id,
required = self.required,
**kw)
display_options = zope.component.queryAdapter(
self, zc.ajaxform.interfaces.IDisplayOptions)
if display_options:
config['display_options'] = display_options
if not self.widget_constructor:
raise ValueError(
'widget_constructor not defined.')
config['widget_constructor'] = self.widget_constructor
if self._renderedValueSet():
value = self.formValue(self._data)
if value is not None:
config['value'] = value
return config
def formValue(self, v):
if v == self.context.missing_value:
return None
return unicode(v)
def value(self, raw):
return self._toValue(raw)
def _toValue(self, v): # for backward compat for a while
return v
def hasInput(self):
return self.id in self.request.form
def _is_missing(self, raw):
return False
def _get_raw(self):
return self.request.form[self.id]
def getInputValue(self):
if not self.hasInput():
raise zope.app.form.interfaces.MissingInputError(
self.id, self.label, self.label+': Missing Input')
raw = self._get_raw()
if self._is_missing(raw):
if self.required:
raise zope.app.form.interfaces.MissingInputError(
self.id, self.label, self.label+': Missing Input')
else:
return self.context.missing_value
value = self.value(raw)
# value must be valid per the field constraints
try:
self.context.validate(value)
except zope.schema.interfaces.ValidationError, v:
raise zope.app.form.interfaces.WidgetInputError(
self.context.__name__, self.label, v)
return value
@zope.cachedescriptors.property.Lazy
def required(self):
return self.context.required
class BasicDisplay(zope.app.form.browser.widget.DisplayWidget):
zope.component.adapts(
zope.schema.interfaces.ITextLine,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zc.ajaxform.widgets.BasicDisplay'
def __init__(self, context, request):
self.id = context.__name__
super(BasicDisplay, self).__init__(context, request)
def formValue(self, v):
if v == self.context.missing_value:
return None
return unicode(v)
def js_config(self, **kw):
# XXX needs tests.
config = dict(
fieldLabel = self.label,
fieldHint = self.hint,
name = self.id,
id = self.id,
required = self.required,
**kw)
if not self.widget_constructor:
raise ValueError(
'widget_constructor not defined.')
display_options = zope.component.queryAdapter(
self, zc.ajaxform.interfaces.IDisplayOptions)
if display_options:
config['display_options'] = display_options
config['widget_constructor'] = self.widget_constructor
if self._renderedValueSet():
value = self._data
if value is not None:
config['value'] = value
return config
class RichTextDisplay(BasicDisplay):
zope.component.adapts(
zope.schema.interfaces.IText,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zc.ajaxform.widgets.RichTextDisplay'
class InputBool(Base):
zope.component.adapts(
zope.schema.interfaces.IBool,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.Bool'
def hasInput(self):
return True
def getInputValue(self):
return self.request.form.get(self.id, '') == 'on'
def formValue(self, v):
if v == self.context.missing_value:
return None
return bool(v)
class InputChoiceIterable(Base):
zope.component.adapts(
zope.schema.interfaces.IChoice,
zope.schema.interfaces.IIterableSource,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.Choice'
def __init__(self, context, source, request):
Base.__init__(self, context, request)
self.source = source
@rwproperty.getproperty
def terms(self):
return zope.component.getMultiAdapter(
(self.source, self.request),
zope.app.form.browser.interfaces.ITerms)
def _is_missing(self, raw):
return not raw
def js_config(self, **kw):
result = Base.js_config(self, **kw)
result['values'] = [
[term.token, term.title]
for term in (self.terms.getTerm(v) for v in self.source)
]
if self.required:
result['allowBlank'] = False
return result
def formValue(self, v):
if v == self.context.missing_value:
return None
return self.terms.getTerm(v).token
def value(self, v):
return self.terms.getValue(v)
class InputChoiceTokenized(InputChoiceIterable):
zope.component.adapts(
zope.schema.interfaces.IChoice,
zope.schema.interfaces.IVocabularyTokenized,
zc.ajaxform.interfaces.IAjaxRequest,
)
def js_config(self, **kw):
result = Base.js_config(self, **kw)
result['hiddenName'] = result['name']+'.value'
result['values'] = [
[term.token, term.title or unicode(term.value)]
for term in self.source
]
if self.required:
result['allowBlank'] = False
return result
def formValue(self, v):
if v == self.context.missing_value:
return None
return self.source.getTerm(v).token
def value(self, v):
return self.source.getTermByToken(v).value
class InputTimeZone(InputChoiceTokenized):
zope.component.adapts(
zope.schema.interfaces.IChoice,
zc.form.interfaces.AvailableTimeZones,
zc.ajaxform.interfaces.IAjaxRequest
)
_timezones = sorted([(tzname, pytz.timezone(tzname))
for tzname in pytz.all_timezones])
def __init__(self, context, source, request):
source = zope.schema.vocabulary.SimpleVocabulary.fromItems(
self._timezones)
InputChoiceIterable.__init__(self, context, source, request)
class InputInt(Base):
zope.component.adapts(
zope.schema.interfaces.IInt,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.Int'
def js_config(self, **kw):
config = Base.js_config(self, **kw)
if self.required:
config['allowBlank'] = False
if self.context.min is not None:
config['field_min'] = self.context.min
if self.context.max is not None:
config['field_max'] = self.context.max
return config
def _is_missing(self, raw):
return not raw
def value(self, v):
try:
return int(v)
except:
raise zope.app.form.interfaces.ConversionError(
u"Invalid integer: %r" % v
)
class NumberSpinner(InputInt):
zope.component.adapts(
zc.ajaxform.interfaces.INumberSpinner,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zc.ajaxform.widgets.NumberSpinner'
class InputDecimal(Base):
zope.component.adapts(
zope.schema.interfaces.IDecimal,
zc.ajaxform.interfaces.IAjaxRequest)
widget_constructor = 'zope.schema.Decimal'
def js_config(self, **kw):
result = Base.js_config(self, **kw)
if self.required:
result['allowBlank'] = False
return result
def _is_missing(self, raw):
return not raw
def _toForm(self, v):
return str(v)
def value(self, v):
try:
return decimal.Decimal(v)
except decimal.InvalidOperation:
raise zope.app.form.interfaces.ConversionError(
u"Invalid decimal: %r" % v)
def getInputValue(self):
v = super(InputDecimal, self).getInputValue()
return str(v)
class InputTextLine(Base):
zope.component.adapts(
zope.schema.interfaces.ITextLine,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.TextLine'
def _is_missing(self, raw):
return (not raw) and self.required
def js_config(self, **kw):
config = Base.js_config(self, **kw)
if self.context.min_length is not None:
config['minLength'] = self.context.min_length
if self.context.min_length > 0 and self.required:
config['allowBlank'] = False
if self.context.max_length is not None:
config['maxLength'] = self.context.max_length
return config
class InputPassword(InputTextLine):
zope.component.adapts(
zope.schema.interfaces.IPassword,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.Password'
class InputText(InputTextLine):
zope.component.adapts(
zope.schema.interfaces.IText,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.Text'
class InputRichText(InputText):
zope.component.adapts(
zope.html.field.IHtmlFragmentField,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zc.ajaxform.widgets.RichText'
class ComboBox(InputChoiceIterable, InputTextLine):
widget_constructor = 'zc.ajaxform.widgets.ComboBox'
def __init__(self, context, source, request):
InputChoiceIterable.__init__(self, context, source, request)
js_config = InputChoiceIterable.js_config
def formValue(self, v):
if not v:
return None
else:
return v
def value(self, v):
return InputTextLine.value(self, v)
class Hidden(Base):
widget_constructor = 'zc.ajaxform.widgets.Hidden'
class RecordList(Base):
zope.component.adapts(
zope.schema.interfaces.IList,
zc.ajaxform.interfaces.IAjaxRequest,
)
widget_constructor = 'zope.schema.List'
@property
def record_schema(self):
try:
record_schema = zope.formlib.form.FormFields(
self.context.value_type.schema)
except TypeError:
record_schema = self.context.value_type.schema
return record_schema
def setUpRecordWidgets(self):
record_widgets = zope.formlib.form.setUpWidgets(
self.record_schema, self.id, self.context.context,
self.request,
ignore_request=True)
return record_widgets
def js_config(self, **kw):
record_widgets = self.setUpRecordWidgets()
config = Base.js_config(self, **kw)
for widget in record_widgets:
assert hasattr(widget, 'js_config'), (
'Could not find a js widget for %r' % widget.name)
config['record_schema'] = dict(
widgets=[widget.js_config() for widget in record_widgets],
readonly = self.context.readonly
)
return config
def hasInput(self):
return True
def _get_record_components(self, idx):
record_widgets = self.setUpRecordWidgets()
str_num = '.%d' % idx
prefix = '%s.' % self.id
# ensure all widget components are present
# if not, all records have been accumulated
component_vals = [
wid for wid in record_widgets
if ('%s%s%s' % (prefix, wid.id, str_num)) in self.request.form]
if not component_vals:
return None
record = {}
for wid in record_widgets:
wid.id = '%s%s%s' % (prefix, wid.id, str_num)
value = wid.getInputValue()
name = wid.id[len(prefix):len(wid.id)-len(str_num)]
record[name] = value
return record
def _get_raw(self):
form_len = (self.request.form)
i = 0
raw = {}
while True:
record = self._get_record_components(i)
if record is None:
break
raw[i] = record
i += 1
if i >= form_len:
break
if not raw and self.required:
raise zope.app.form.interfaces.MissingInputError(
self.id, self.label, self.label+': Missing Input')
return [raw_data for raw_data in raw.values()]
def validate(self, value):
try:
keys = [field.__name__ for field in self.record_schema]
for item in value:
for key in item:
if key not in keys:
raise zope.schema.interfaces.ValidationError(
'%s is not part of the recordschema' % (key))
except TypeError:
raise zope.schema.interfaces.ValidationError(
'%s is not a properly formatted value for a list field' % (
value))
def getInputValue(self):
if not self.hasInput():
raise zope.app.form.interfaces.MissingInputError(
self.id, self.label, self.label+': Missing Input')
raw = self._get_raw()
value = self.value(raw)
# value must be valid per the field constraints
try:
self.validate(value)
except zope.schema.interfaces.ValidationError, v:
raise zope.app.form.interfaces.WidgetInputError(
self.context.__name__, self.label, v)
return value
def formValue(self, value):
record_widgets = self.setUpRecordWidgets()
new_value = []
if value:
for item in value:
new_value.append(
dict([(widget.id, widget.formValue(item.get(widget.id)))
for widget in record_widgets]))
return new_value | zc.ajaxform | /zc.ajaxform-0.7.0.tar.gz/zc.ajaxform-0.7.0/src/zc/ajaxform/widgets.py | widgets.py |
===========
What is it?
===========
The ``zc.async`` package provides **an easy-to-use Python tool that schedules
work persistently and reliably across multiple processes and machines.**
For instance...
- *Web apps*: maybe your web application lets users request the creation of a
large PDF, or some other expensive task.
- *Postponed work*: maybe you have a job that needs to be done at a certain time,
not right now.
- *Parallel processing*: maybe you have a long-running problem that can be made
to complete faster by splitting it up into discrete parts, each performed in
parallel, across multiple machines.
- *Serial processing*: maybe you want to decompose and serialize a job.
High-level features include the following:
- easy to use;
- flexible configuration, changeable dynamically in production;
- reliable;
- supports high availability;
- good debugging tools;
- well-tested; and
- friendly to testing.
While developed as part of the Zope project, zc.async can be used stand-alone.
=================
How does it work?
=================
The system uses the Zope Object Database (ZODB), a transactional, pickle-based
Python object database, for communication and coordination among participating
processes.
zc.async participants can each run in their own process, or share a process
(run in threads) with other code.
The Twisted framework supplies some code (failures and reactor implementations,
primarily) and some concepts to the package.
======================
Where can I read more?
======================
Quickstarts and in-depth documentation are available in the package and in
the `new and exciting on-line documentation`_.
.. _`new and exciting on-line documentation`: http://packages.python.org/zc.async/1.5.0/
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/README.txt | README.txt |
.. _two-database-set-up:
-------------------
Two Database Set Up
-------------------
Even though it is a bit more trouble to set up, large-scale production usage
will probably prefer to use this approach, over the shared single database
described above.
For our zope.conf, we only need one additional stanza to the one seen above::
<zodb async>
<filestorage>
create true
path REPLACE_THIS_WITH_PATH_TO_STORAGE
</filestorage>
</zodb>
(You would replace "REPLACE_THIS_WITH_PATH_TO_STORAGE" with the path to the
storage file.)
As before, you will probably prefer to use ZEO rather than FileStorage in
production.
The zdaemon.conf instructions are the same: set the ZC_ASYNC_UUID environment
variable properly in the zdaemon.conf file.
For our site.zcml, the only difference is that we use the
multidb_dispatcher_policy.zcml file rather than the
basic_dispatcher_policy.zcml file.
If you want to change policy, change "multidb_dispatcher_policy.zcml" to
"dispatcher.zcml" in the example above and register your replacement bits for
the policy in "multidb_dispatcher_policy.zcml". You'll see that most of that
comes from code in subscribers.py, which can be adjusted easily.
If we process the files described above, and wait for a poll, we've got a
working set up [#process_multi]_.
>>> import zc.async.dispatcher
>>> dispatcher = zc.async.dispatcher.get()
>>> import pprint
>>> pprint.pprint(get_poll(dispatcher, 0))
{'': {'main': {'active jobs': [],
'error': None,
'len': 0,
'new jobs': [],
'size': 3}}}
>>> bool(dispatcher.activated)
True
As before, we can ask for a job to be performed, and get the result.
>>> conn = db.open()
>>> root = conn.root()
>>> import zc.async.interfaces
>>> queue = zc.async.interfaces.IQueue(root)
>>> import operator
>>> import zc.async.job
>>> job = queue.put(zc.async.job.Job(operator.mul, 21, 2))
>>> import transaction
>>> transaction.commit()
>>> wait_for_result(job)
42
Hopefully zc.async will be an easy-to-configure, easy-to-use, and useful tool
for you! Good luck! [#shutdown]_
.. rubric:: Footnotes
.. [#process_multi]
>>> import errno, os, random, socket, tempfile
>>> dir = tempfile.mkdtemp()
>>> site_zcml_file = os.path.join(dir, 'site.zcml')
>>> s = socket.socket()
>>> for i in range(20):
... monitor_port = random.randint(20000, 49151)
... try:
... s.bind(('127.0.0.1', monitor_port))
... except socket.error, e:
... if e.args[0] == errno.EADDRINUSE:
... pass
... else:
... raise
... else:
... s.close()
... break
... else:
... assert False, 'could not find available port'
... monitor_port = None
...
>>> zope_conf = """
... site-definition %(site_zcml_file)s
...
... <zodb main>
... <filestorage>
... create true
... path %(main_storage_path)s
... </filestorage>
... </zodb>
...
... <zodb async>
... <filestorage>
... create true
... path %(async_storage_path)s
... </filestorage>
... </zodb>
...
... <product-config zc.z3monitor>
... port %(monitor_port)s
... </product-config>
...
... <logger>
... level debug
... name zc.async
... propagate no
...
... <logfile>
... path %(async_event_log)s
... </logfile>
... </logger>
...
... <logger>
... level debug
... name zc.async.trace
... propagate no
...
... <logfile>
... path %(async_trace_log)s
... </logfile>
... </logger>
...
... <eventlog>
... <logfile>
... formatter zope.exceptions.log.Formatter
... path STDOUT
... </logfile>
... <logfile>
... formatter zope.exceptions.log.Formatter
... path %(event_log)s
... </logfile>
... </eventlog>
... """ % {'site_zcml_file': site_zcml_file,
... 'main_storage_path': os.path.join(dir, 'main.fs'),
... 'async_storage_path': os.path.join(dir, 'async.fs'),
... 'monitor_port': monitor_port,
... 'event_log': os.path.join(dir, 'z3.log'),
... 'async_event_log': os.path.join(dir, 'async.log'),
... 'async_trace_log': os.path.join(dir, 'async_trace.log'),}
...
>>> os.environ['ZC_ASYNC_UUID'] = os.path.join(dir, 'uuid.txt')
>>> site_zcml = """
... <configure xmlns='http://namespaces.zope.org/zope'
... xmlns:meta="http://namespaces.zope.org/meta"
... >
... <include package="zope.component" file="meta.zcml" />
... <include package="zope.component" />
... <include package="zc.z3monitor" />
... <include package="zc.async" file="multidb_dispatcher_policy.zcml" />
...
... <!-- this is usually handled in Zope applications by the
... zope.app.keyreference.persistent.connectionOfPersistent adapter -->
... <adapter factory="zc.twist.connection" />
... </configure>
... """
>>> zope_conf_file = os.path.join(dir, 'zope.conf')
>>> f = open(zope_conf_file, 'w')
>>> f.write(zope_conf)
>>> f.close()
>>> f = open(site_zcml_file, 'w')
>>> f.write(site_zcml)
>>> f.close()
>>> import zdaemon.zdoptions
>>> import zope.app.appsetup
>>> options = zdaemon.zdoptions.ZDOptions()
>>> options.schemadir = os.path.join(
... os.path.dirname(os.path.abspath(zope.app.appsetup.__file__)),
... 'schema')
>>> options.realize(['-C', zope_conf_file])
>>> config = options.configroot
>>> import zope.app.appsetup.product
>>> zope.app.appsetup.product.setProductConfigurations(
... config.product_config)
>>> ignore = zope.app.appsetup.config(config.site_definition)
>>> import zope.app.appsetup.appsetup
>>> db = zope.app.appsetup.appsetup.multi_database(config.databases)[0][0]
>>> import zope.event
>>> import zc.async.interfaces
>>> zope.event.notify(zc.async.interfaces.DatabaseOpened(db))
>>> from zc.async.testing import get_poll, wait_for_result
.. [#shutdown]
>>> import zc.async.dispatcher
>>> dispatcher = zc.async.dispatcher.get()
>>> dispatcher.reactor.callFromThread(dispatcher.reactor.stop)
>>> dispatcher.thread.join(3)
>>> db.close()
>>> db.databases['async'].close()
>>> import shutil
>>> shutil.rmtree(dir)
| zc.async | /zc.async-1.5.4.zip/zc.async-1.5.4/src/zc/async/README_3b.txt | README_3b.txt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.