code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# stdlib
import base64
import logging
import os
from datetime import datetime
from math import ceil
# Bunch
from bunch import bunchify
# cryptography
from cryptography.fernet import Fernet, InvalidToken
# Python 2/3 compatibility
from builtins import bytes
# Zato
from zato.common.crypto.const import well_known_data, zato_stdin_prefix
from zato.common.ext.configobj_ import ConfigObj
from zato.common.json_internal import loads
# ################################################################################################################################
logger = logging.getLogger(__name__)
# ################################################################################################################################
class SecretKeyError(Exception):
pass
# ################################################################################################################################
class CryptoManager(object):
""" Used for encryption and decryption of secrets.
"""
def __init__(self, repo_dir=None, secret_key=None, stdin_data=None, well_known_data=None):
# We always get it on input rather than reading it directly because our caller
# may want to provide it to subprocesses in which case reading it in this process
# would consume it and the other process would not be able to access it.
self.stdin_data = stdin_data
# In case we have a repository directory on input, look up the secret keys and well known data here ..
if not secret_key:
if repo_dir:
secret_key, well_known_data = self.get_config(repo_dir)
# .. no matter if given on input or through repo_dir, we can set up crypto keys now.
self.set_config(secret_key, well_known_data)
# Callers will be able to register their hashing scheme which will end up in this dict by name
self.hash_scheme = {}
# ################################################################################################################################
def add_hash_scheme(self, name, rounds, salt_size):
""" Adds a new named PBKDF2 hashing scheme, i.e. a set of named variables and a hashing object.
"""
# hashlib
from passlib import hash as passlib_hash
self.hash_scheme[name] = passlib_hash.pbkdf2_sha512.using(rounds=rounds, salt_size=salt_size)
# ################################################################################################################################
def get_config(self, repo_dir):
raise NotImplementedError('Must be implemented by subclasses')
# ################################################################################################################################
def _find_secret_key(self, secret_key):
""" It's possible that what is in config files is not a secret key directly, but information where to find it,
e.g. in environment variables or stdin. This method looks it up in such cases.
"""
secret_key = secret_key.decode('utf8') if isinstance(secret_key, bytes) else secret_key
# Environment variables
if secret_key.startswith('$'):
try:
env_key = secret_key[1:].upper()
value = os.environ[env_key]
except KeyError:
raise SecretKeyError('Environment variable not found `{}`'.format(env_key))
# Read from stdin
elif secret_key.startswith(zato_stdin_prefix):
value = self.stdin_data
if not value:
raise SecretKeyError('No data provided on stdin')
elif not secret_key:
raise SecretKeyError('Secret key is missing')
# Use the value as it is
else:
value = secret_key
# Fernet keys always require encoding
value = value if isinstance(value, bytes) else value.encode('utf8')
# Create a transient key just to confirm that what we found was syntactically correct.
# Note that we use our own invalid backend which will not be used by Fernet for anything
# but we need to provide it to make sure Fernet.__init__ does not import its default backend.
try:
Fernet(value, backend='invalid')
except Exception as e:
raise SecretKeyError(e.args)
else:
return value
# ################################################################################################################################
def set_config(self, secret_key, well_known_data):
""" Sets crypto attributes and, to be double sure that they are correct,
decrypts well known data to itself in order to confirm that keys are valid / expected.
"""
key = self._find_secret_key(secret_key)
self.secret_key = Fernet(key)
self.well_known_data = well_known_data if well_known_data else None
if self.well_known_data:
self.check_consistency()
# ################################################################################################################################
def check_consistency(self):
""" Used as a consistency check to confirm that a given component's key can decrypt well-known data.
"""
try:
decrypted = self.decrypt(self.well_known_data)
except InvalidToken:
raise SecretKeyError('Invalid key, could not decrypt well-known data')
else:
if decrypted != well_known_data:
raise SecretKeyError('Expected for `{}` to equal to `{}`'.format(decrypted, well_known_data))
# ################################################################################################################################
@staticmethod
def generate_key():
""" Creates a new random string for Fernet keys.
"""
return Fernet.generate_key()
# ################################################################################################################################
@staticmethod
def generate_secret(bits=256):
""" Generates a secret string of bits size.
"""
return base64.urlsafe_b64encode(os.urandom(int(bits / 8)))
# ################################################################################################################################
@staticmethod
def generate_password(bits=192, to_str=False):
""" Generates a string strong enough to be a password (default: 192 bits)
"""
# type: (int, bool) -> str
value = CryptoManager.generate_secret(bits)
return value.decode('utf8') if to_str else value
# ################################################################################################################################
@classmethod
def from_repo_dir(cls, secret_key, repo_dir, stdin_data):
""" Creates a new CryptoManager instance from a path to configuration file(s).
"""
return cls(secret_key=secret_key, repo_dir=repo_dir, stdin_data=stdin_data)
# ################################################################################################################################
@classmethod
def from_secret_key(cls, secret_key, well_known_data=None, stdin_data=None):
""" Creates a new CryptoManager instance from an already existing secret key.
"""
return cls(secret_key=secret_key, well_known_data=well_known_data, stdin_data=stdin_data)
# ################################################################################################################################
def encrypt(self, data):
""" Encrypts incoming data, which must be a string.
"""
if not isinstance(data, bytes):
data = data.encode('utf8')
return self.secret_key.encrypt(data)
# ################################################################################################################################
def decrypt(self, encrypted):
""" Returns input data in a clear-text, decrypted, form.
"""
if not isinstance(encrypted, bytes):
encrypted = encrypted.encode('utf8')
return self.secret_key.decrypt(encrypted).decode('utf8')
# ################################################################################################################################
def hash_secret(self, data, name='zato.default'):
""" Hashes input secret using a named configured (e.g. PBKDF2-SHA512, 100k rounds, salt 32 bytes).
"""
return self.hash_scheme[name].hash(data)
# ################################################################################################################################
def verify_hash(self, given, expected, name='zato.default'):
return self.hash_scheme[name].verify(given, expected)
# ################################################################################################################################
@staticmethod
def get_hash_rounds(goal, header_func=None, progress_func=None, footer_func=None):
return HashParamsComputer(goal, header_func, progress_func, footer_func).get_info()
# ################################################################################################################################
def get_config_entry(self, entry):
raise NotImplementedError('May be implemented by subclasses')
# ################################################################################################################################
class WebAdminCryptoManager(CryptoManager):
""" CryptoManager for web-admin instances.
"""
def get_config(self, repo_dir):
conf_path = os.path.join(repo_dir, 'web-admin.conf')
conf = bunchify(loads(open(conf_path).read()))
return conf['zato_secret_key'], conf['well_known_data']
# ################################################################################################################################
class SchedulerCryptoManager(CryptoManager):
""" CryptoManager for schedulers.
"""
def get_config(self, repo_dir):
conf_path = os.path.join(repo_dir, 'scheduler.conf')
conf = bunchify(ConfigObj(conf_path, use_zato=False))
return conf.secret_keys.key1, conf.crypto.well_known_data
# ################################################################################################################################
class ServerCryptoManager(CryptoManager):
""" CryptoManager for servers.
"""
def get_config(self, repo_dir):
conf_path = os.path.join(repo_dir, 'secrets.conf')
conf = bunchify(ConfigObj(conf_path, use_zato=False))
return conf.secret_keys.key1, conf.zato.well_known_data
# ################################################################################################################################
class HashParamsComputer(object):
""" Computes parameters for hashing purposes, e.g. number of rounds in PBKDF2.
"""
def __init__(self, goal, header_func=None, progress_func=None, footer_func=None, scheme='pbkdf2_sha512', loops=10,
iters_per_loop=10, salt_size=64, rounds_per_iter=25000):
# hashlib
from passlib import hash as passlib_hash
self.goal = goal
self.header_func = header_func
self.progress_func = progress_func
self.footer_func = footer_func
self.scheme = scheme
self.loops = loops
self.iters_per_loop = iters_per_loop
self.iters = self.loops * self.iters_per_loop
self.salt_size = salt_size
self.rounds_per_iter = rounds_per_iter
self.report_per_cent = 5.0
self.report_once_in = self.iters * self.report_per_cent / 100.0
self.hash_scheme = getattr(passlib_hash, scheme).using(salt_size=salt_size, rounds=rounds_per_iter)
self.cpu_info = self.get_cpu_info()
self._round_down_to_nearest = 1000
self._round_up_to_nearest = 5000
# ################################################################################################################################
def get_cpu_info(self):
""" Returns metadata about current CPU the computation is executed on.
"""
# py-cpuinfo
from cpuinfo import get_cpu_info
cpu_info = get_cpu_info()
return {
'brand': cpu_info['brand'],
'hz_actual': cpu_info['hz_actual']
}
# ################################################################################################################################
def get_info(self, _utcnow=datetime.utcnow):
if self.header_func:
self.header_func(self.cpu_info, self.goal)
all_results = []
current_iter = 0
current_loop = 0
# We have several iterations to take into account sudden and unexpected CPU usage spikes,
# outliers stemming from such cases which will be rejected.
while current_loop < self.loops:
current_loop += 1
current_loop_iter = 0
current_loop_result = []
while current_loop_iter < self.iters_per_loop:
current_iter += 1
current_loop_iter += 1
start = _utcnow()
self.hash_scheme.hash(well_known_data)
current_loop_result.append((_utcnow() - start).total_seconds())
if self.progress_func:
if current_iter % self.report_once_in == 0:
per_cent = int((current_iter / self.iters) * 100)
self.progress_func(per_cent)
all_results.append(sum(current_loop_result) / len(current_loop_result))
# On average, that many seconds were needed to create a hash with self.rounds rounds ..
sec_needed = min(all_results)
# .. we now need to extrapolate it to get the desired self.goal seconds.
rounds_per_second = int(self.rounds_per_iter / sec_needed)
rounds_per_second = self.round_down(rounds_per_second)
rounds = int(rounds_per_second * self.goal)
rounds = self.round_up(rounds)
rounds_per_second_str = '{:,d}'.format(rounds_per_second)
rounds_str = '{:,d}'.format(rounds).rjust(len(rounds_per_second_str))
if self.footer_func:
self.footer_func(rounds_per_second_str, rounds_str)
return {
'rounds_per_second': int(rounds_per_second),
'rounds_per_second_str': rounds_per_second_str.strip(),
'rounds': int(rounds),
'rounds_str': rounds_str.strip(),
'cpu_info': self.cpu_info,
'algorithm': 'PBKDF2-SHA512',
'salt_size': self.salt_size,
}
# ################################################################################################################################
def round_down(self, value):
return int(round(value / self._round_down_to_nearest) * self._round_down_to_nearest)
# ################################################################################################################################
def round_up(self, value):
return int(ceil(value / self._round_up_to_nearest) * self._round_up_to_nearest)
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/crypto/api.py | api.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import errno
import logging
import os
import stat
import tempfile
from datetime import datetime, timedelta
from fcntl import fcntl
from io import StringIO
from traceback import format_exc
from uuid import uuid4
# gevent
from gevent import sleep
# pyrapidjson
from rapidjson import loads
# Python 2/3 compatibility
from builtins import bytes
# Zato
from zato.common.api import IPC
from zato.common.ipc.publisher import Publisher
from zato.common.ipc.subscriber import Subscriber
from zato.common.util.api import spawn_greenlet
from zato.common.util.file_system import fs_safe_name
# ################################################################################################################################
logger = logging.getLogger(__name__)
# ################################################################################################################################
fifo_create_mode = stat.S_IRUSR | stat.S_IWUSR
fifo_ignore_err = errno.EAGAIN, errno.EWOULDBLOCK
# On Linux, this is F_LINUX_SPECIFIC_BASE (1024) + 7
_F_SETPIPE_SZ = 1031
# ################################################################################################################################
class IPCAPI(object):
""" API through which IPC is performed.
"""
def __init__(self, name=None, on_message_callback=None, pid=None):
self.name = name
self.on_message_callback = on_message_callback
self.pid = pid
self.pid_publishers = {} # Target PID -> Publisher object connected to that target PID's subscriber socket
self.subscriber = None
# ################################################################################################################################
@staticmethod
def get_endpoint_name(cluster_name, server_name, target_pid):
return fs_safe_name('{}-{}-{}'.format(cluster_name, server_name, target_pid))
# ################################################################################################################################
def run(self):
self.subscriber = Subscriber(self.on_message_callback, self.name, self.pid)
spawn_greenlet(self.subscriber.serve_forever)
# ################################################################################################################################
def close(self):
if self.subscriber:
self.subscriber.close()
for publisher in self.pid_publishers.values():
publisher.close()
# ################################################################################################################################
def publish(self, payload):
self.publisher.publish(payload)
# ################################################################################################################################
def _get_pid_publisher(self, cluster_name, server_name, target_pid):
# We do no have a publisher connected to that PID, so we need to create it ..
if target_pid not in self.pid_publishers:
# Create a publisher and sleep for a moment until it connects to the other socket
publisher = Publisher(self.get_endpoint_name(cluster_name, server_name, target_pid), self.pid)
# We can tolerate it because it happens only the very first time our PID invokes target_pid
sleep(0.1)
# We can now store it for later use
self.pid_publishers[target_pid] = publisher
# At this point we are sure we have a publisher for target PID
return self.pid_publishers[target_pid]
# ################################################################################################################################
def _get_response(self, fifo, buffer_size, read_size=21, fifo_ignore_err=fifo_ignore_err, empty=('', b'', None)):
try:
buff = StringIO()
data = object() # Use a sentinel because '' or None are expected from os.read
# The most common use-case for IPC are publish/subscribe messages and the most
# common response is this: 'zs;{"r": {"r": null}}'
# which is 21 bytes.
while data not in empty:
data = os.read(fifo, read_size)
buff.write(data.decode('utf8') if isinstance(data, bytes) else data)
response = buff.getvalue()
status = response[:IPC.STATUS.LENGTH]
response = response[IPC.STATUS.LENGTH+1:] # Add 1 to account for the separator
is_success = status == IPC.STATUS.SUCCESS
if is_success:
response = loads(response) if response else ''
buff.close()
return is_success, response
except OSError as e:
if e.errno not in fifo_ignore_err:
raise
# ################################################################################################################################
def invoke_by_pid(self, service, payload, cluster_name, server_name, target_pid,
fifo_response_buffer_size, timeout=90, is_async=False, skip_response_elem=False):
""" Invokes a service through IPC, synchronously or in background. If target_pid is an exact PID then this one worker
process will be invoked if it exists at all.
"""
# Create a FIFO pipe to receive replies to come through
fifo_path = os.path.join(tempfile.tempdir, 'zato-ipc-fifo-{}'.format(uuid4().hex))
os.mkfifo(fifo_path, fifo_create_mode)
try:
publisher = self._get_pid_publisher(cluster_name, server_name, target_pid)
publisher.publish(payload, service, target_pid, reply_to_fifo=fifo_path)
# Async = we do not need to wait for any response
if is_async:
return
is_success, response = False, None
try:
# Open the pipe for reading ..
fifo_fd = os.open(fifo_path, os.O_RDONLY | os.O_NONBLOCK)
fcntl(fifo_fd, _F_SETPIPE_SZ, 1000000)
# .. and wait for response ..
now = datetime.utcnow()
until = now + timedelta(seconds=timeout)
while now < until:
sleep(0.05)
is_success, response = self._get_response(fifo_fd, fifo_response_buffer_size)
if response:
break
else:
now = datetime.utcnow()
except Exception:
logger.warn('Exception in IPC FIFO, e:`%s`', format_exc())
finally:
os.close(fifo_fd)
return is_success, response
except Exception:
logger.warn(format_exc())
finally:
os.remove(fifo_path)
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/ipc/api.py | api.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import os
from datetime import datetime
from tempfile import gettempdir
# ZeroMQ
import zmq.green as zmq
# Zato
from zato.common.api import DATA_FORMAT, NO_DEFAULT_VALUE
from zato.common.util.api import get_logger_for_class, make_repr, new_cid, spawn_greenlet
# ################################################################################################################################
class Request(object):
def __init__(self, publisher_tag, publisher_pid, payload='', request_id=None):
self.publisher_tag = publisher_tag
self.publisher_pid = publisher_pid
self.action = NO_DEFAULT_VALUE
self.service = ''
self._payload = payload
self.payload_type = type(payload).__name__
self.data_format = DATA_FORMAT.DICT
self.request_id = request_id or 'ipc.{}'.format(new_cid())
self.target_pid = None
self.reply_to_tag = ''
self.reply_to_fifo = ''
self.in_reply_to = ''
self.creation_time_utc = datetime.utcnow()
@property
def payload(self):
return self._payload
@payload.setter
def payload(self, value):
self._payload = value
self.payload_type = type(self._payload)
def __repr__(self):
return make_repr(self)
# ################################################################################################################################
class IPCBase(object):
""" Base class for core IPC objects.
"""
def __init__(self, name, pid):
self.name = name
self.pid = pid
self.ctx = zmq.Context()
spawn_greenlet(self.set_up_sockets)
self.keep_running = True
self.logger = get_logger_for_class(self.__class__)
self.log_connected()
def __repr__(self):
return make_repr(self)
def set_up_sockets(self):
raise NotImplementedError('Needs to be implemented in subclasses')
def log_connected(self):
raise NotImplementedError('Needs to be implemented in subclasses')
def close(self):
raise NotImplementedError('Needs to be implemented in subclasses')
# ################################################################################################################################
class IPCEndpoint(IPCBase):
""" A participant in IPC conversations, i.e. either publisher or subscriber.
"""
socket_method = None
socket_type = None
def __init__(self, name, pid):
self.address = self.get_address(name)
super(IPCEndpoint, self).__init__(name, pid)
def get_address(self, address):
return 'ipc://{}'.format(os.path.join(gettempdir(), 'zato-ipc-{}'.format(address)))
def set_up_sockets(self):
self.socket = self.ctx.socket(getattr(zmq, self.socket_type.upper()))
self.socket.setsockopt(zmq.LINGER, 0)
getattr(self.socket, self.socket_method)(self.address)
def log_connected(self):
self.logger.info('Established %s/%s to %s (self.pid: %s)', self.socket_type, self.socket_method, self.address, self.pid)
def close(self):
self.keep_running = False
self.socket.close()
self.ctx.term()
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/ipc/__init__.py | __init__.py |
# ################################################################################################################################
# ################################################################################################################################
class FileTransferChannel(object):
def __init__(self):
self._config_attrs = []
self.id = None # type: int
self.name = None # type: str
self.is_active = None # type: bool
self.is_hot_deploy = None # type: bool
self.source_type = None # type: str
self.pickup_from = '' # type: str
self.parse_with = '' # type: str
self.ftp_source_id = None # type: int
self.line_by_line = None # type: bool
self.file_patterns = '' # type: str
self.service_list = None # type: list
self.topic_list = None # type: list
self.outconn_rest_list = None # type: list
self.read_on_pickup = None # type: bool
self.sftp_source_id = None # type: int
self.parse_on_pickup = None # type: bool
self.ftp_source_name = '' # type: str
self.sftp_source_name = '' # type: str
self.service_list_json = None # type: str
self.topic_list_json = None # type: str
self.outconn_rest_list_json = None # type: str
self.scheduler_job_id = None # type: int
self.move_processed_to = '' # type: str
self.delete_after_pickup = None # type: bool
# ################################################################################################################################
def to_dict(self):
out = {}
for name in self._config_attrs:
value = getattr(self, name)
out[name] = value
return out
# ################################################################################################################################
@staticmethod
def from_dict(config):
# type: (dict) -> FileTransferChannel
out = FileTransferChannel()
for k, v in config.items():
out._config_attrs.append(k)
setattr(out, k, v)
return out
# ################################################################################################################################
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/model/file_transfer.py | file_transfer.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# ################################################################################################################################
# ################################################################################################################################
class BaseException(Exception):
pass
class AddressNotAllowed(BaseException):
pass
class RateLimitReached(BaseException):
pass
# ################################################################################################################################
# ################################################################################################################################
class Const:
from_any = '*'
rate_any = '*'
class Unit:
minute = 'm'
hour = 'h'
day = 'd'
@staticmethod
def all_units():
return set([Const.Unit.minute, Const.Unit.hour, Const.Unit.day])
# ################################################################################################################################
# ################################################################################################################################
class ObjectInfo(object):
""" Information about an individual object covered by rate limiting.
"""
__slots__ = 'type_', 'id', 'name'
def __init__(self):
self.type_ = None # type: str
self.id = None # type: int
self.name = None # type: str
# ################################################################################################################################
# ################################################################################################################################
class DefinitionItem(object):
__slots__ = 'config_line', 'from_', 'rate', 'unit', 'object_id', 'object_type', 'object_name'
def __init__(self):
self.config_line = None # type: int
self.from_ = None # type: object
self.rate = None # type: int
self.unit = None # type: str
self.object_id = None # type: int
self.object_type = None # type: str
self.object_name = None # type: str
def __repr__(self):
return '<{} at {}; line:{}, from:{}, rate:{}, unit:{} ({} {} {})>'.format(
self.__class__.__name__, hex(id(self)), self.config_line, self.from_, self.rate, self.unit,
self.object_id, self.object_name, self.object_type)
# ################################################################################################################################
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/rate_limiting/common.py | common.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from logging import getLogger
# gevent
from gevent.lock import RLock
# netaddr
from netaddr import IPNetwork
# SQLAlchemy
from sqlalchemy import and_
# Zato
from zato.common.rate_limiting.common import Const, DefinitionItem, ObjectInfo
from zato.common.rate_limiting.limiter import Approximate, Exact, RateLimitStateDelete, RateLimitStateTable
# Python 2/3 compatibility
from past.builtins import unicode
# ################################################################################################################################
# Type checking
import typing
if typing.TYPE_CHECKING:
# stdlib
from typing import Callable
# Zato
from zato.common.rate_limiting.limiter import BaseLimiter
from zato.distlock import LockManager
# For pyflakes
BaseLimiter = BaseLimiter
Callable = Callable
LockManager = LockManager
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
# ################################################################################################################################
class DefinitionParser(object):
""" Parser for user-provided rate limiting definitions.
"""
@staticmethod
def get_lines(definition, object_id, object_type, object_name, parse_only=False):
# type: (unicode, int, unicode, unicode, bool) -> list
if not parse_only:
out = []
definition = definition if isinstance(definition, unicode) else definition.decode('utf8')
for idx, orig_line in enumerate(definition.splitlines(), 1): # type: int, unicode
line = orig_line.strip()
if (not line) or line.startswith('#'):
continue
line = line.split('=')
if len(line) != 2:
raise ValueError('Invalid definition line `{}`; (idx:{})'.format(orig_line, idx))
from_, rate_info = line # type: unicode, unicode
from_ = from_.strip()
if from_ != Const.from_any:
from_ = IPNetwork(from_)
rate_info = rate_info.strip()
if rate_info == Const.rate_any:
rate = Const.rate_any
unit = Const.Unit.day # This is arbitrary but it does not matter because there is no rate limit in effect
else:
rate, unit = rate_info.split('/') # type: unicode, unicode
rate = int(rate.strip())
unit = unit.strip()
all_units = Const.all_units()
if unit not in all_units:
raise ValueError('Unit `{}` is not one of `{}`'.format(unit, all_units))
# In parse-only mode we do not build any actual output
if parse_only:
continue
item = DefinitionItem()
item.config_line = idx
item.from_ = from_
item.rate = rate
item.unit = unit
item.object_id = object_id
item.object_type = object_type
item.object_name = object_name
out.append(item)
if not parse_only:
return out
# ################################################################################################################################
@staticmethod
def check_definition(definition):
# type: (unicode)
DefinitionParser.get_lines(definition.strip(), None, None, None, True)
# ################################################################################################################################
@staticmethod
def check_definition_from_input(input_data):
# type: (dict)
rate_limit_def = input_data.get('rate_limit_def') or ''
if rate_limit_def:
DefinitionParser.check_definition(rate_limit_def)
# ################################################################################################################################
def parse(self, definition, object_id, object_type, object_name):
# type: (unicode, int, unicode, unicode) -> list
return DefinitionParser.get_lines(definition.strip(), object_id, object_type, object_name)
# ################################################################################################################################
# ################################################################################################################################
class RateLimiting(object):
""" Main API for the management of rate limiting functionality.
"""
__slots__ = 'parser', 'config_store', 'lock', 'sql_session_func', 'global_lock_func', 'cluster_id'
def __init__(self):
self.parser = DefinitionParser() # type: DefinitionParser
self.config_store = {} # type: dict
self.lock = RLock()
self.global_lock_func = None # type: LockManager
self.sql_session_func = None # type: Callable
self.cluster_id = None # type: int
# ################################################################################################################################
def _get_config_key(self, object_type, object_name):
# type: (unicode, unicode) -> unicode
return '{}:{}'.format(object_type, object_name)
# ################################################################################################################################
def _get_config_by_object(self, object_type, object_name):
# type: (unicode, unicode) -> BaseLimiter
return self.config_store.get(self._get_config_key(object_type, object_name))
# ################################################################################################################################
def _create_config(self, object_dict, definition, is_exact):
# type: (dict, unicode, bool) -> BaseLimiter
object_id = object_dict['id']
object_type = object_dict['type_']
object_name = object_dict['name']
info = ObjectInfo()
info.id = object_id
info.type_ = object_type
info.name = object_name
parsed = self.parser.parse(definition or '', object_id, object_type, object_name)
if parsed:
def_first = parsed[0]
has_from_any = def_first.from_ == Const.from_any
else:
has_from_any = False
config = Exact(self.cluster_id, self.sql_session_func) if is_exact else Approximate(self.cluster_id) # type: BaseLimiter
config.is_active = object_dict['is_active']
config.is_exact = is_exact
config.api = self
config.object_info = info
config.definition = parsed
config.parent_type = object_dict['parent_type']
config.parent_name = object_dict['parent_name']
if has_from_any:
config.has_from_any = has_from_any
config.from_any_rate = def_first.rate
config.from_any_unit = def_first.unit
config.from_any_object_id = object_id
config.from_any_object_type = object_type
config.from_any_object_name = object_name
return config
# ################################################################################################################################
def create(self, object_dict, definition, is_exact):
# type: (dict, unicode, bool)
config = self._create_config(object_dict, definition, is_exact)
self.config_store[config.get_config_key()] = config
# ################################################################################################################################
def check_limit(self, cid, object_type, object_name, from_, needs_warn=True):
""" Checks if input object has already reached its allotted usage limit.
"""
# type: (unicode, unicode, unicode, unicode)
with self.lock:
config = self._get_config_by_object(object_type, object_name)
# It is possible that we do not have configuration for such an object,
# in which case we will log a warning.
if config:
with config.lock:
config.check_limit(cid, from_)
else:
if needs_warn:
logger.warn('No such rate limiting object `%s` (%s)', object_name, object_type)
# ################################################################################################################################
def _delete_from_odb(self, object_type, object_id):
with closing(self.sql_session_func()) as session:
session.execute(RateLimitStateDelete().where(and_(
RateLimitStateTable.c.object_type==object_type,
RateLimitStateTable.c.object_id==object_id,
)))
session.commit()
# ################################################################################################################################
def _delete(self, object_type, object_name, remove_parent):
""" Deletes configuration for input data, optionally deleting references to it from all objects that depended on it.
Must be called with self.lock held.
"""
# type: (unicode, unicode, bool)
config_key = self._get_config_key(object_type, object_name)
limiter = self.config_store[config_key] # type: BaseLimiter
del self.config_store[config_key]
if limiter.is_exact:
self._delete_from_odb(object_type, limiter.object_info.id)
if remove_parent:
self._set_new_parent(object_type, object_name, None, None)
# ################################################################################################################################
def _set_new_parent(self, parent_type, old_parent_name, new_parent_type, new_parent_name):
""" Sets new parent for all configuration entries matching the old one. Must be called with self.lock held.
"""
# type: (unicode, unicode, unicode, unicode)
for child_config in self.config_store.values(): # type: BaseLimiter
object_info = child_config.object_info
# This is our own config
if object_info.type_ == parent_type and object_info.name == old_parent_name:
continue
# This object has a parent, possibly it is our very configuration
if child_config.has_parent:
# Yes, this is our config ..
if child_config.parent_type == parent_type and child_config.parent_name == old_parent_name:
# We typically want to change the parent's name but it is possible
# that both type and name will be None (in case we are removing a parent from a child object)
# which is why both are set here.
child_config.parent_type = new_parent_type
child_config.parent_name = new_parent_name
# ################################################################################################################################
def edit(self, object_type, old_object_name, object_dict, definition, is_exact):
""" Changes, in place, an existing configuration entry to input data.
"""
# type: (unicode, unicode, dict, unicode, bool)
# Note the whole of this operation is under self.lock to make sure the update is atomic
# from our callers' perspective.
with self.lock:
old_config = self._get_config_by_object(object_type, old_object_name)
if not old_config:
raise ValueError('Rate limiting object not found `{}` ({})'.format(old_object_name, object_type))
# Just to be sure we are doing the right thing, compare object types, old and new
if object_type != old_config.object_info.type_:
raise ValueError('Unexpected object_type, old:`{}`, new:`{}` ({}) ({})'.format(
old_config.object_info.type_, object_type, old_object_name, object_dict))
# Now, create a new config object ..
new_config = self._create_config(object_dict, definition, is_exact)
# .. in case it was a rename ..
if old_config.object_info.name != new_config.object_info.name:
# .. make all child objects depend on the new name, in case it changed
self._set_new_parent(object_type, old_object_name, new_config.object_info.type_, new_config.object_info.name)
# First, delete the old configuration, but do not delete any objects that depended on it
# because we are just editing the former, not deleting it altogether.
self._delete(object_type, old_object_name, False)
# Now, create a new key
self.config_store[new_config.get_config_key()] = new_config
# ################################################################################################################################
def delete(self, object_type, object_name):
""" Deletes configuration for input object and clears out parent references to it.
"""
# type: (unicode, unicode)
with self.lock:
self._delete(object_type, object_name, True)
# ################################################################################################################################
def _get_config(self, object_type, object_name):
""" Returns configuration for the input object, assumming we have it at all.
"""
# type: (unicode, unicode) -> BaseLimiter
config_key = self._get_config_key(object_type, object_name)
return self.config_store.get(config_key)
# ################################################################################################################################
def get_config(self, object_type, object_name):
# type: (unicode, unicode) -> BaseLimiter
with self.lock:
return self._get_config(object_type, object_name)
# ################################################################################################################################
def has_config(self, object_type, object_name):
# type: (unicode, unicode) -> bool
with self.lock:
return bool(self._get_config(object_type, object_name))
# ################################################################################################################################
def cleanup(self):
""" Invoked periodically by the scheduler - goes through all configuration elements and cleans up
all time periods that are no longer needed.
"""
for config in self.config_store.values(): # type: BaseLimiter
config.cleanup()
# ################################################################################################################################
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/rate_limiting/__init__.py | __init__.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from copy import deepcopy
from datetime import datetime
# gevent
from gevent.lock import RLock
# netaddr
from netaddr import IPAddress
# Zato
from zato.common.odb.model import RateLimitState
from zato.common.odb.query.rate_limiting import current_period_list, current_state as current_state_query
from zato.common.rate_limiting.common import Const, AddressNotAllowed, RateLimitReached
# Python 2/3 compatibility
from future.utils import iterkeys
# ################################################################################################################################
if 0:
# stdlib
from typing import Callable
# Zato
from zato.common.rate_limiting import Approximate as RateLimiterApproximate, RateLimiting
from zato.common.rate_limiting.common import DefinitionItem, ObjectInfo
# For pyflakes
Callable = Callable
DefinitionItem = DefinitionItem
ObjectInfo = ObjectInfo
RateLimiterApproximate = RateLimiterApproximate
RateLimiting = RateLimiting
# ################################################################################################################################
RateLimitStateTable = RateLimitState.__table__
RateLimitStateDelete = RateLimitStateTable.delete
# ################################################################################################################################
# ################################################################################################################################
class BaseLimiter(object):
""" A per-server, approximate, rate limiter object. It is approximate because it does not keep track
of what current rate limits in other servers are.
"""
__slots__ = 'current_idx', 'lock', 'api', 'object_info', 'definition', 'has_from_any', 'from_any_rate', 'from_any_unit', \
'is_limit_reached', 'ip_address_cache', 'current_period_func', 'by_period', 'parent_type', 'parent_name', \
'is_exact', 'from_any_object_id', 'from_any_object_type', 'from_any_object_name', 'cluster_id', 'is_active', \
'invocation_no'
initial_state = {
'requests': 0,
'last_cid': None,
'last_request_time_utc': None,
'last_from': None,
'last_network': None,
}
def __init__(self, cluster_id):
# type: (int)
self.cluster_id = cluster_id
self.is_active = None
self.current_idx = 0
self.lock = RLock()
self.api = None # type: RateLimiting
self.object_info = None # type: ObjectInfo
self.definition = None # type: list
self.has_from_any = None # type: bool
self.from_any_rate = None # type: int
self.from_any_unit = None # type: str
self.ip_address_cache = {} # type: dict
self.by_period = {} # type: dict
self.parent_type = None # type: str
self.parent_name = None # type: str
self.is_exact = None # type: bool
self.invocation_no = 0 # type: int
self.from_any_object_id = None # type: int
self.from_any_object_type = None # type: str
self.from_any_object_name = None # type: str
self.current_period_func = {
Const.Unit.day: self._get_current_day,
Const.Unit.hour: self._get_current_hour,
Const.Unit.minute: self._get_current_minute,
}
# ################################################################################################################################
@property
def has_parent(self):
return self.parent_type and self.parent_name
# ################################################################################################################################
def cleanup(self):
""" Cleans up time periods that are no longer needed.
"""
with self.lock:
# First, periodically clear out the IP cache to limit its size to 1,000 items
if len(self.ip_address_cache) >= 1000:
self.ip_address_cache.clear()
now = datetime.utcnow()
current_minute = self._get_current_minute(now)
current_hour = self._get_current_hour(now)
current_day = self._get_current_day(now)
# We need a copy so as not to modify the dict in place
periods = self._get_current_periods()
to_delete = set()
current_periods_map = {
Const.Unit.minute: current_minute,
Const.Unit.hour: current_hour,
Const.Unit.day: current_day
}
for period in periods: # type: str
period_unit = period[0] # type: str # One of Const.Unit instances
current_period = current_periods_map[period_unit]
# If this period is in the past, add it to the ones to be deleted
if period < current_period:
to_delete.add(period)
if to_delete:
self._delete_periods(to_delete)
# ################################################################################################################################
def rewrite_rate_data(self, old_config):
""" Writes rate limiting information from old configuration to our own. Used by RateLimiting.edit action.
"""
# type: (RateLimiterApproximate)
# Already collected rate limits
self.by_period.clear()
self.by_period.update(old_config.by_period)
# ################################################################################################################################
def get_config_key(self):
# type: () -> str
return '{}:{}'.format(self.object_info.type_, self.object_info.name)
# ################################################################################################################################
def _get_rate_config_by_from(self, orig_from, _from_any=Const.from_any):
# type: (str, str) -> DefinitionItem
from_ = self.ip_address_cache.setdefault(orig_from, IPAddress(orig_from)) # type: IPAddress
found = None
for line in self.definition: # type: DefinitionItem
# A catch-all * pattern
if line.from_ == _from_any:
found = line
break
# A network match
elif from_ in line.from_:
found = line
break
# We did not match any line from configuration
if not found:
raise AddressNotAllowed('Address not allowed `{}`'.format(orig_from))
# We found a matching piece of from IP configuration
return found
# ################################################################################################################################
def _get_current_day(self, now, _prefix=Const.Unit.day, _format='%Y-%m-%d'):
# type: (datetime, str, str) -> str
return '{}.{}'.format(_prefix, now.strftime(_format))
def _get_current_hour(self, now, _prefix=Const.Unit.hour, _format='%Y-%m-%dT%H'):
# type: (datetime, str, str) -> str
return '{}.{}'.format(_prefix, now.strftime(_format))
def _get_current_minute(self, now, _prefix=Const.Unit.minute, _format='%Y-%m-%dT%H:%M'):
# type: (datetime, str, str) -> str
return '{}.{}'.format(_prefix, now.strftime(_format))
# ################################################################################################################################
def _format_last_info(self, current_state):
# type: (dict) -> str
return 'last_from:`{last_from}; last_request_time_utc:`{last_request_time_utc}; last_cid:`{last_cid}`;'.format(
**current_state)
# ################################################################################################################################
def _raise_rate_limit_exceeded(self, rate, unit, orig_from, network_found, current_state, cid,
def_object_id, def_object_name, def_object_type):
raise RateLimitReached('Max. rate limit of {}/{} reached; from:`{}`, network:`{}`; {} (cid:{}) (def:{} {} {})'.format(
rate, unit, orig_from, network_found, self._format_last_info(current_state), cid, def_object_id, def_object_type,
def_object_name))
# ################################################################################################################################
def _check_limit(self, cid, orig_from, network_found, rate, unit, def_object_id, def_object_name, def_object_type,
_rate_any=Const.rate_any, _utcnow=datetime.utcnow):
# type: (str, str, str, int, str, str, object, str, str)
# Increase invocation counter
self.invocation_no += 1
# Local aliases
now = _utcnow()
# Get current period, e.g. current day, hour or minute
current_period_func = self.current_period_func[unit]
current_period = current_period_func(now)
current_state = self._get_current_state(current_period, network_found)
# Unless we are allowed to have any rate ..
if rate != _rate_any:
# We may have reached the limit already ..
if current_state['requests'] >= rate:
self._raise_rate_limit_exceeded(rate, unit, orig_from, network_found, current_state, cid,
def_object_id, def_object_name, def_object_type)
# Update current metadata state
self._set_new_state(current_state, cid, orig_from, network_found, now, current_period)
# Above, we checked our own rate limit but it is still possible that we have a parent
# that also wants to check it.
if self.has_parent:
self.api.check_limit(cid, self.parent_type, self.parent_name, orig_from)
# Clean up old entries periodically
if self.invocation_no % 1000 == 0:
self.cleanup()
# ################################################################################################################################
def check_limit(self, cid, orig_from):
# type: (str, str)
with self.lock:
if self.has_from_any:
rate = self.from_any_rate
unit = self.from_any_unit
network_found = Const.from_any
def_object_id = None
def_object_type = None
def_object_name = None
else:
found = self._get_rate_config_by_from(orig_from)
rate = found.rate
unit = found.unit
network_found = found.from_
def_object_id = found.object_id
def_object_type = found.object_type
def_object_name = found.object_name
# Now, check actual rate limits
self._check_limit(cid, orig_from, network_found, rate, unit, def_object_id, def_object_name, def_object_type)
# ################################################################################################################################
def _get_current_periods(self):
raise NotImplementedError()
_get_current_state = _set_new_state = _delete_periods = _get_current_periods
# ################################################################################################################################
# ################################################################################################################################
class Approximate(BaseLimiter):
def _get_current_periods(self):
return list(iterkeys(self.by_period))
# ################################################################################################################################
def _delete_periods(self, to_delete):
for item in to_delete: # item: str
del self.by_period[item]
# ################################################################################################################################
def _get_current_state(self, current_period, network_found):
# type: (str, str) -> dict
# Get or create a dictionary of requests information for current period
period_dict = self.by_period.setdefault(current_period, {}) # type: dict
# Get information about already stored requests for that network in current period
return period_dict.setdefault(network_found, deepcopy(self.initial_state))
# ################################################################################################################################
def _set_new_state(self, current_state, cid, orig_from, network_found, now, *ignored):
current_state['requests'] += 1
current_state['last_cid'] = cid
current_state['last_request_time_utc'] = now.isoformat()
current_state['last_from'] = orig_from
current_state['last_network'] = str(network_found)
# ################################################################################################################################
# ################################################################################################################################
class Exact(BaseLimiter):
def __init__(self, cluster_id, sql_session_func):
# type: (int, Callable)
super(Exact, self).__init__(cluster_id)
self.sql_session_func = sql_session_func
# ################################################################################################################################
def _fetch_current_state(self, session, current_period, network_found):
# type: (str, str) -> RateLimitState
# We have a complex Python object but for the query we just need its string representation
network_found = str(network_found)
return current_state_query(session, self.cluster_id, self.object_info.type_, self.object_info.id,
current_period, network_found).\
first()
# ################################################################################################################################
def _get_current_state(self, current_period, network_found):
# type: (str, str) -> dict
current_state = deepcopy(self.initial_state) # type: dict
with closing(self.sql_session_func()) as session:
item = self._fetch_current_state(session, current_period, network_found)
if item:
current_state.update(item.asdict())
return current_state
# ################################################################################################################################
def _set_new_state(self, current_state, cid, orig_from, network_found, now, current_period):
# We just need a string representation of this object
network_found = str(network_found)
with closing(self.sql_session_func()) as session:
item = self._fetch_current_state(session, current_period, network_found)
if item:
item.last_cid = cid
item.last_from = orig_from
item.last_request_time_utc = now
else:
item = RateLimitState()
item.cluster_id = self.cluster_id
item.object_type = self.object_info.type_
item.object_id = self.object_info.id
item.requests = 0
item.period = current_period
item.network = network_found
item.last_cid = cid
item.last_from = orig_from
item.last_network = network_found
item.last_request_time_utc = now
item.requests += 1
session.add(item)
session.commit()
# ################################################################################################################################
def _get_current_periods(self):
with closing(self.sql_session_func()) as session:
return [elem[0] for elem in current_period_list(session, self.cluster_id).\
all()]
# ################################################################################################################################
def _delete_periods(self, to_delete):
with closing(self.sql_session_func()) as session:
session.execute(RateLimitStateDelete().where(
RateLimitStateTable.c.period.in_(to_delete)
))
session.commit()
# ################################################################################################################################
# ################################################################################################################################ | zato-common | /zato-common-3.2.1.tar.gz/zato-common-3.2.1/src/zato/common/rate_limiting/limiter.py | limiter.py |
[zato-connection-registry](https://github.com/emre/zato-connection-registry) is a command line application/library to load, backup, restore REST connection definitions in Zato servers.
If you maintain lots of REST API service definitions and experiment Zato at the same time (spinning up new Zato instances for development, load balancing Zato's internal load balancer, etc.) then it may be a pain to migrate these services.
Of course, it's possible to do that migration with the internal Zato database. However, it's not much practical.
This tool uses Zato's [internal services](https://zato.io/blog/posts/public-api.html) to fetch and pull service definitions, and uses [zato-client](https://zato.io/docs/progguide/clients/python.html) package.
Backup files are stored in the JSON format.
Note: Since ```zato-client``` package is a requirement for this tool, and python2 only, zato-connection-registry is also a python2 project.
# Installation
```
$ pip install zato-connection-registry
```
# Commands
- Backup connection definitions
```
$ zato_connection_registry http://172.31.52.2:11223 pubapi:123 /tmp/foo.json
```
- Restore connection definitions
```
$ zato_connection_registry restore http://172.31.52.2:11223 pubapi:123 /tmp/foo.json
```
# Using zato-connection-registry as a library
After the installation, you can use the package as you wish:
```
from zato_connection_registry.registry import Registry
r = Registry(
"http://localhost:11223",
"pubapi",
"123",
)
r.load_rest_channels()
print(r.rest_channels)
```
# Limitations
- Only REST channel definitions (including incoming and outcoing) are supported.
# Running tests
```
python tests.py
...
----------------------------------------------------------------------
Ran 3 tests in 0.049s
OK
```
| zato-connection-registry | /zato-connection-registry-0.0.4.tar.gz/zato-connection-registry-0.0.4/README.md | README.md |
import json
import logging
from zato.client import APIClient
logger = logging.getLogger(__name__)
logging.basicConfig()
class Registry:
"""
A Registry class to fetch and push incoming/outgoing connection
definitions from any Zato instance, to any Zato instance.
"""
def __init__(self, zato_addr, username, password,
path="/zato/json/{}", cluster_id=None):
"""
Note: The default path argument works for Zato 3.0. You may need to
change if you use older Zato versions.
Args:
:param zato_addr (str): The address of Zato instance
:param username (str): Username for the Zato client
:param password (str): Password for the Zato client
:param path (str): URL path for the API calls.
:param cluster_id (int): The cluster id to pull/push connections
"""
self.zato_addr = zato_addr
self.username = username
self.password = password
self.path = path
self.client = APIClient(
zato_addr,
self.username,
self.password,
path=path
)
self.cluster_id = cluster_id or 1
self.rest_channels = []
def load_rest_channels(self):
"""Loads all REST connection definitions from the remote Zato server
and injects it to self.rest_channels property.
"""
response = self.client.invoke(
"zato.http-soap.get-list",
{"cluster_id": self.cluster_id}
)
if not response.data:
raise ValueError(response.details)
response_key = "zato_http_soap_get_list_response"
for outgoing_connection in response.data[response_key]:
# skip internal connections
if outgoing_connection.get("is_internal"):
continue
self.rest_channels.append(outgoing_connection)
def dump_to_json(self, json_file):
"""Dumps the loaded channels into a specified JSON file.
Note: if there are no channels loaded, it fetches up to date channels
from the remote Zato Server.
:param json_file (str): The JSON file path to dump the channels
"""
if not len(self.rest_channels):
self.load_rest_channels()
with open(json_file, 'w+') as f:
json.dump(self.rest_channels, f, indent=4, sort_keys=True)
def channel_to_request_params(self, channel, cluster_id=1):
"""Makes raw channel data compatible for the Zato's
zato.http-soap.create endpoint.
:param channel (dict): raw channel data
:param cluster_id (int): The cluster id to pull/push connections
:return: (dict)
"""
request = {
"cluster_id": cluster_id,
"is_active": channel.get("is_active"),
"is_internal": channel.get("is_internal"),
"name": channel.get("name"),
"transport": channel.get("transport"),
"url_path": channel.get("url_path"),
"cache_expiry": channel.get("cache_expiry"),
"cache_id": channel.get("cache_id"),
"content_encoding": channel.get("content_encoding"),
"content_type": channel.get("content_type"),
"data_format": channel.get("data_format"),
"has_rbac": channel.get("has_rbac"),
"host": channel.get("host"),
"match_slash": channel.get("match_slash"),
"merge_url_params_req": channel.get("merge_url_params_req"),
"method": channel.get("method"),
"params_pri": channel.get("params_pri"),
"ping_method": channel.get("ping_method"),
"pool_size": channel.get("pool_size"),
"sec_tls_ca_cert_id": channel.get("sec_tls_ca_cert_id"),
"security_id": channel.get("security_id"),
"serialization_type": channel.get("serialization_type"),
"soap_action": channel.get("soap_action"),
"timeout": channel.get("timeout"),
"url_params_pri": channel.get("url_params_pri"),
"service": channel.get("service_name")
}
# if that's a incoming service definition
if channel["connection"] == "channel":
request.update({
"connection": "channel",
})
else:
request.update({"connection": "outgoing"})
return request
def restore_rest_channels(self, from_file=None,
from_list=None, from_registry_instance=None):
"""Restores the saved connection list into the remote Zato server.
:param from_file (str): A JSON file path includes connections
:param from_list: (list): A python list of channels
:param from_registry_instance: (Registry) A registry instance to get
the old connection definitions.
"""
channel_list = []
if from_file:
with open(from_file, 'r') as f:
channel_list = json.load(f)
elif from_list:
channel_list = from_list
elif from_registry_instance:
channel_list = from_registry_instance.rest_channels
for channel in channel_list:
self.restore_channel(channel)
def restore_channel(self, channel):
"""Creates a single channel on the remote Zato server.
:param channel (dict): Channel data
"""
response = self.client.invoke(
"zato.http-soap.create",
self.channel_to_request_params(channel, self.cluster_id)
)
if not response.data:
details = json.loads(response.details)
# There must be a better way to do this
# but it looks like, to understand if a connection already
# exist in the Zato, you have to parse the stacktrace.
if 'An object of that name `{}` already exists on this' \
' cluster' in details["zato_env"]["details"]:
logger.info("%s is already defined. ", channel["name"])
else:
logger.error(response.details)
else:
logger.info("%s added to the connections.", channel["name"]) | zato-connection-registry | /zato-connection-registry-0.0.4.tar.gz/zato-connection-registry-0.0.4/zato_connection_registry/registry.py | registry.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# Part of Zato - Open-source ESB, SOA, REST, APIs and Cloud Integrations in Python
# https://zato.io
# stdlib
import logging
import sys
from logging import getLogger, Formatter
# click
import click
click.disable_unicode_literals_warning = True
# cryptography
from cryptography.fernet import Fernet, InvalidToken
# future
from builtins import str
# Tailer
from tailer import follow
# ################################################################################################################################
log_prefix = 'enclogdata:'
log_prefix_len = len(log_prefix)
cli_key_option = '--key'
cli_key_prompt='Crypto key'
cli_key_confirm_prompt=False
cli_key_help='Crypto key to decrypt data with.'
# ################################################################################################################################
class EncryptedLogFormatter(Formatter):
def __init__(self, key=None, *args, **kwargs):
key = key or kwargs.pop('fernet_key')
self.fernet = Fernet(key)
return super(EncryptedLogFormatter, self).__init__(*args, **kwargs)
def format(self, record):
msg = record.getMessage()
if isinstance(msg, str):
msg = msg.encode('utf8')
record.msg = '{}{}'.format(log_prefix, self.fernet.encrypt(msg).decode('utf8'))
# record.getMessage() above already formats the complete message
# using the required record.args. Once encrypted there is no use
# for the record.args. Hence we set it to None.
# This is necessary to allow logs of the following kind
# logging.info("Log: %s", some_string)
# If we do not set record.args to None, we would get exceptions such as
# "TypeError: not all arguments converted during string formatting"
record.args = None
return super(EncryptedLogFormatter, self).format(record)
# ################################################################################################################################
def _open(ctx, path, key, needs_tailf=False):
fernet = Fernet(key)
# Plain open
f = open(path)
# tail -f
if needs_tailf:
f = follow(f, delay=0.1)
for line in f:
prefix, encrypted = line.split(log_prefix)
try:
if isinstance(encrypted, str):
encrypted = encrypted.encode('utf8')
sys.stdout.write('{}{}\n'.format(prefix, fernet.decrypt(encrypted).decode('utf8')))
sys.stdout.flush()
except InvalidToken:
sys.stderr.write('Invalid crypto key\n')
sys.exit(1)
# ################################################################################################################################
@click.group()
def cli_main():
pass
# ################################################################################################################################
def genkey():
return Fernet.generate_key()
@click.command()
@click.pass_context
def _genkey(ctx):
sys.stdout.write('{}\n'.format(genkey()))
@click.command()
@click.pass_context
def demo(ctx):
plain_text = b'{"user":"Jane Xi"}'
key = Fernet.generate_key()
fernet = Fernet(key)
encrypted = fernet.encrypt(plain_text)
decrypted = fernet.decrypt(encrypted)
sys.stdout.write('\nPlain text: {}\n'.format(plain_text))
sys.stdout.write('Key: {}\n'.format(key))
sys.stdout.write('Encrypted: {}\n'.format(encrypted))
sys.stdout.write('Decrypted: {}\n\n'.format(decrypted))
def get_arg(name):
@click.command()
@click.argument('path', type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=True))
@click.password_option(cli_key_option, prompt=cli_key_prompt, confirmation_prompt=cli_key_confirm_prompt, help=cli_key_help)
@click.pass_context
def _cli_arg(ctx, path, key):
_open(ctx, path, key.encode('utf-8'), True if name == 'tailf' else False)
return _cli_arg
cli_main.add_command(_genkey, 'genkey')
cli_main.add_command(demo)
for name in ('open', 'tailf'):
cli_main.add_command(get_arg(name), name)
# ################################################################################################################################
if __name__ == '__main__':
level = logging.DEBUG
format = '%(levelname)s - %(message)s'
key = Fernet.generate_key()
formatter = EncryptedLogFormatter(key, format)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = getLogger('')
logger.addHandler(handler)
logger.setLevel(level)
logger.info('{"user":"Jane Xi"')
# ################################################################################################################################ | zato-enclog | /zato-enclog-1.0.7.tar.gz/zato-enclog-1.0.7/src/zato/enclog/_core.py | _core.py |
bunch
=====
Bunch is a dictionary that supports attribute-style access, a la JavaScript.
>>> b = Bunch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Bunch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
Dictionary Methods
------------------
A Bunch is a subclass of ``dict``; it supports all the methods a ``dict`` does:
>>> b.keys()
['foo', 'hello']
Including ``update()``:
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print repr(b)
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
As well as iteration:
>>> [ (k,b[k]) for k in b ]
[('ponies', 'are pretty!'), ('foo', Bunch(lol=True)), ('hello', 42)]
And "splats":
>>> "The {knights} who say {ni}!".format(**Bunch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
Serialization
-------------
Bunches happily and transparently serialize to JSON and YAML.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> import json
>>> json.dumps(b)
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
If JSON support is present (``json`` or ``simplejson``), ``Bunch`` will have a ``toJSON()`` method which returns the object as a JSON string.
If you have PyYAML_ installed, Bunch attempts to register itself with the various YAML Representers so that Bunches can be transparently dumped and loaded.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> import yaml
>>> yaml.dump(b)
'!bunch.Bunch\nfoo: !bunch.Bunch {lol: true}\nhello: 42\nponies: are pretty!\n'
>>> yaml.safe_dump(b)
'foo: {lol: true}\nhello: 42\nponies: are pretty!\n'
In addition, Bunch instances will have a ``toYAML()`` method that returns the YAML string using ``yaml.safe_dump()``. This method also replaces ``__str__`` if present, as I find it far more readable. You can revert back to Python's default use of ``__repr__`` with a simple assignment: ``Bunch.__str__ = Bunch.__repr__``. The Bunch class will also have a static method ``Bunch.fromYAML()``, which loads a Bunch out of a YAML string.
Finally, Bunch converts easily and recursively to (``unbunchify()``, ``Bunch.toDict()``) and from (``bunchify()``, ``Bunch.fromDict()``) a normal ``dict``, making it easy to cleanly serialize them in other formats.
Miscellaneous
-------------
* It is safe to ``import *`` from this module. You'll get: ``Bunch``, ``bunchify``, and ``unbunchify``.
* Ample doctests::
$ python -m bunch.test
$ python -m bunch.test -v | tail -n22
1 items had no tests:
bunch.fromYAML
16 items passed all tests:
8 tests in bunch
13 tests in bunch.Bunch
7 tests in bunch.Bunch.__contains__
4 tests in bunch.Bunch.__delattr__
7 tests in bunch.Bunch.__getattr__
3 tests in bunch.Bunch.__repr__
5 tests in bunch.Bunch.__setattr__
2 tests in bunch.Bunch.fromDict
2 tests in bunch.Bunch.toDict
5 tests in bunch.bunchify
2 tests in bunch.from_yaml
3 tests in bunch.toJSON
6 tests in bunch.toYAML
3 tests in bunch.to_yaml
3 tests in bunch.to_yaml_safe
4 tests in bunch.unbunchify
77 tests in 17 items.
77 passed and 0 failed.
Test passed.
Feedback
--------
Open a ticket / fork the project on GitHub_, or send me an email at `[email protected]`_.
.. _PyYAML: http://pyyaml.org/wiki/PyYAML
.. _GitHub: http://github.com/dsc/bunch
.. [email protected]: mailto:[email protected]
| zato-ext-bunch | /zato-ext-bunch-1.2.tar.gz/zato-ext-bunch-1.2/README.rst | README.rst |
__version__ = '1.0.1'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = ('Bunch', 'bunchify','unbunchify',)
from .python3_compat import *
class Bunch(dict):
""" A dictionary that provides attribute-style access.
>>> b = Bunch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Bunch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
A Bunch is a subclass of dict; it supports all the methods a dict does...
>>> sorted(b.keys())
['foo', 'hello']
Including update()...
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print (repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
As well as iteration...
>>> [ (k,b[k]) for k in b ]
[('ponies', 'are pretty!'), ('foo', Bunch(lol=True)), ('hello', 42)]
And "splats".
>>> "The {knights} who say {ni}!".format(**Bunch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
See unbunchify/Bunch.toDict, bunchify/Bunch.fromDict for notes about conversion.
"""
def __contains__(self, k):
""" >>> b = Bunch(ponies='are pretty!')
>>> 'ponies' in b
True
>>> 'foo' in b
False
>>> b['foo'] = 42
>>> 'foo' in b
True
>>> b.hello = 'hai'
>>> 'hello' in b
True
>>> b[None] = 123
>>> None in b
True
>>> b[False] = 456
>>> False in b
True
"""
try:
return dict.__contains__(self, k) or hasattr(self, k)
except:
return False
# only called if k not found in normal places
def __getattr__(self, k):
""" Gets key if it exists, otherwise throws AttributeError.
nb. __getattr__ is only called if key is not found in normal places.
>>> b = Bunch(bar='baz', lol={})
>>> b.foo
Traceback (most recent call last):
...
AttributeError: foo
>>> b.bar
'baz'
>>> getattr(b, 'bar')
'baz'
>>> b['bar']
'baz'
>>> b.lol is b['lol']
True
>>> b.lol is getattr(b, 'lol')
True
"""
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
""" Sets attribute k if it exists, otherwise sets key k. A KeyError
raised by set-item (only likely if you subclass Bunch) will
propagate as an AttributeError instead.
>>> b = Bunch(foo='bar', this_is='useful when subclassing')
>>> b.values #doctest: +ELLIPSIS
<built-in method values of Bunch object at 0x...>
>>> b.values = 'uh oh'
>>> b.values
'uh oh'
>>> b['values']
Traceback (most recent call last):
...
KeyError: 'values'
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
""" Deletes attribute k if it exists, otherwise deletes key k. A KeyError
raised by deleting the key--such as when the key is missing--will
propagate as an AttributeError instead.
>>> b = Bunch(lol=42)
>>> del b.values
Traceback (most recent call last):
...
AttributeError: 'Bunch' object attribute 'values' is read-only
>>> del b.lol
>>> b.lol
Traceback (most recent call last):
...
AttributeError: lol
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError:
raise AttributeError(k)
else:
object.__delattr__(self, k)
def toDict(self):
""" Recursively converts a bunch back into a dictionary.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> b.toDict()
{'ponies': 'are pretty!', 'foo': {'lol': True}, 'hello': 42}
See unbunchify for more info.
"""
return unbunchify(self)
def __repr__(self):
""" Invertible* string-form of a Bunch.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> print (repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> eval(repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
(*) Invertible so long as collection contents are each repr-invertible.
"""
keys = list(iterkeys(self))
keys.sort()
args = ', '.join(['%s=%r' % (key, self[key]) for key in keys])
return '%s(%s)' % (self.__class__.__name__, args)
@staticmethod
def fromDict(d):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = Bunch.fromDict({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
See bunchify for more info.
"""
return bunchify(d)
# While we could convert abstract types like Mapping or Iterable, I think
# bunchify is more likely to "do what you mean" if it is conservative about
# casting (ex: isinstance(str,Iterable) == True ).
#
# Should you disagree, it is not difficult to duplicate this function with
# more aggressive coercion to suit your own purposes.
def bunchify(x):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = bunchify({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
bunchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = bunchify({ 'lol': ('cats', {'hah':'i win again'}),
... 'hello': [{'french':'salut', 'german':'hallo'}] })
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win again'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return Bunch( (k, bunchify(v)) for k,v in x.items() )
elif isinstance(x, (list, tuple)):
return type(x)( bunchify(v) for v in x )
else:
return x
def unbunchify(x):
""" Recursively converts a Bunch into a dictionary.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> unbunchify(b)
{'ponies': 'are pretty!', 'foo': {'lol': True}, 'hello': 42}
unbunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42,
... ponies=('are pretty!', Bunch(lies='are trouble!')))
>>> unbunchify(b) #doctest: +NORMALIZE_WHITESPACE
{'ponies': ('are pretty!', {'lies': 'are trouble!'}),
'foo': ['bar', {'lol': True}], 'hello': 42}
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return dict( (k, unbunchify(v)) for k,v in x.items() )
elif isinstance(x, (list, tuple)):
return type(x)( unbunchify(v) for v in x )
else:
return x
### Serialization
try:
try:
import json
except ImportError:
import simplejson as json
def toJSON(self, **options):
""" Serializes this Bunch to JSON. Accepts the same keyword options as `json.dumps()`.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> json.dumps(b)
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
>>> b.toJSON()
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
"""
return json.dumps(self, **options)
Bunch.toJSON = toJSON
except ImportError:
pass
try:
# Attempt to register ourself with PyYAML as a representer
import yaml
from yaml.representer import Representer, SafeRepresenter
def from_yaml(loader, node):
""" PyYAML support for Bunches using the tag `!bunch` and `!bunch.Bunch`.
>>> import yaml
>>> yaml.load('''
... Flow style: !bunch.Bunch { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
... Block style: !bunch
... Clark : Evans
... Brian : Ingerson
... Oren : Ben-Kiki
... ''') #doctest: +NORMALIZE_WHITESPACE
{'Flow style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki'),
'Block style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki')}
This module registers itself automatically to cover both Bunch and any
subclasses. Should you want to customize the representation of a subclass,
simply register it with PyYAML yourself.
"""
data = Bunch()
yield data
value = loader.construct_mapping(node)
data.update(value)
def to_yaml_safe(dumper, data):
""" Converts Bunch to a normal mapping node, making it appear as a
dict in the YAML output.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
"""
return dumper.represent_dict(data)
def to_yaml(dumper, data):
""" Converts Bunch to a representation node.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.dump(b, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
"""
return dumper.represent_mapping(u('!bunch.Bunch'), data)
yaml.add_constructor(u('!bunch'), from_yaml)
yaml.add_constructor(u('!bunch.Bunch'), from_yaml)
SafeRepresenter.add_representer(Bunch, to_yaml_safe)
SafeRepresenter.add_multi_representer(Bunch, to_yaml_safe)
Representer.add_representer(Bunch, to_yaml)
Representer.add_multi_representer(Bunch, to_yaml)
# Instance methods for YAML conversion
def toYAML(self, **options):
""" Serializes this Bunch to YAML, using `yaml.safe_dump()` if
no `Dumper` is provided. See the PyYAML documentation for more info.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> b.toYAML(default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> yaml.dump(b, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
>>> b.toYAML(Dumper=yaml.Dumper, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
"""
opts = dict(indent=4, default_flow_style=False)
opts.update(options)
if 'Dumper' not in opts:
return yaml.safe_dump(self, **opts)
else:
return yaml.dump(self, **opts)
def fromYAML(*args, **kwargs):
return bunchify( yaml.load(*args, **kwargs) )
Bunch.toYAML = toYAML
Bunch.fromYAML = staticmethod(fromYAML)
except ImportError:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | zato-ext-bunch | /zato-ext-bunch-1.2.tar.gz/zato-ext-bunch-1.2/bunch/__init__.py | __init__.py |
**This fork of inotifyx is Python 3-friendly, works with both Python 2.7 and Python 3.4+.**
inotifyx is a simple Python binding to the Linux inotify file system event
monitoring API.
Documentation is provided in the module. To get help, start an interactive
Python session and type:
>>> import inotifyx
>>> help(inotifyx)
You can also test out inotifyx easily. The following command will print events
for /tmp:
python -m inotifyx /tmp
Tests can be run via setup.py:
./setup.py test
Note that the module must be built and installed for tests to run correctly.
In the future, this requirement will be lifted.
| zato-ext-inotifyx | /zato-ext-inotifyx-0.3.1.tar.gz/zato-ext-inotifyx-0.3.1/README.md | README.md |
import os, select
from inotifyx import binding
from inotifyx.distinfo import version as __version__
constants = {}
for name in dir(binding):
if name.startswith('IN_'):
globals()[name] = constants[name] = getattr(binding, name)
init = binding.init
rm_watch = binding.rm_watch
add_watch = binding.add_watch
class InotifyEvent(object):
'''
InotifyEvent(wd, mask, cookie, name)
A representation of the inotify_event structure. See the inotify
documentation for a description of these fields.
'''
wd = None
mask = None
cookie = None
name = None
def __init__(self, wd, mask, cookie, name):
self.wd = wd
self.mask = mask
self.cookie = cookie
self.name = name
def __str__(self):
return '%s: %s' % (self.wd, self.get_mask_description())
def __repr__(self):
return '%s(%s, %s, %s, %s)' % (
self.__class__.__name__,
repr(self.wd),
repr(self.mask),
repr(self.cookie),
repr(self.name),
)
def get_mask_description(self):
'''
Return an ASCII string describing the mask field in terms of
bitwise-or'd IN_* constants, or 0. The result is valid Python code
that could be eval'd to get the value of the mask field. In other
words, for a given event:
>>> from inotifyx import *
>>> assert (event.mask == eval(event.get_mask_description()))
'''
parts = []
for name, value in constants.items():
if self.mask & value:
parts.append(name)
if parts:
return '|'.join(parts)
return '0'
def get_events(fd, *args):
'''
get_events(fd[, timeout])
Return a list of InotifyEvent instances representing events read from
inotify. If timeout is None, this will block forever until at least one
event can be read. Otherwise, timeout should be an integer or float
specifying a timeout in seconds. If get_events times out waiting for
events, an empty list will be returned. If timeout is zero, get_events
will not block.
'''
return [
InotifyEvent(wd, mask, cookie, name)
for wd, mask, cookie, name in binding.get_events(fd, *args)
] | zato-ext-inotifyx | /zato-ext-inotifyx-0.3.1.tar.gz/zato-ext-inotifyx-0.3.1/inotifyx/__init__.py | __init__.py |
pytds
=====
.. image:: https://secure.travis-ci.org/denisenkom/pytds.png?branch=master
:target: https://travis-ci.org/denisenkom/pytds
.. image:: https://ci.appveyor.com/api/projects/status/a5h4y29063crqtet?svg=true
:target: https://ci.appveyor.com/project/denisenkom/pytds
.. image:: http://img.shields.io/pypi/v/python-tds.svg
:target: https://pypi.python.org/pypi/python-tds/
.. image:: https://codecov.io/gh/denisenkom/pytds/branch/master/graph/badge.svg
:target: https://codecov.io/gh/denisenkom/pytds
`Python DBAPI`_ driver for MSSQL using pure Python TDS (Tabular Data Stream) protocol implementation.
Doesn't depend on ADO or FreeTDS. Can be used on any platform, including Linux, MacOS, Windows.
It can be used with https://pypi.python.org/pypi/django-sqlserver as a Django database backend.
Features
--------
* Fully supports new MSSQL 2008 date types: datetime2, date, time, datetimeoffset
* MARS
* Bulk insert
* Table-valued parameters
* TLS connection encryption
* Kerberos support on non-Windows platforms (requires kerberos package)
Installation
------------
To install run this command:
.. code-block:: bash
$ pip install python-tds
If you want to use TLS you should also install pyOpenSSL package:
.. code-block:: bash
$ pip install pyOpenSSL
For a better performance install bitarray package too:
.. code-block:: bash
$ pip install bitarray
To use Kerberos on non-Windows platforms (experimental) install kerberos package:
.. code-block:: bash
$ pip install kerberos
Documentation
-------------
Documentation is available at https://python-tds.readthedocs.io/en/latest/.
Example
-------
To connect to database do
.. code-block:: python
import pytds
with pytds.connect('server', 'database', 'user', 'password') as conn:
with conn.cursor() as cur:
cur.execute("select 1")
cur.fetchall()
To enable TLS you should also provide cafile parameter which should be a file name containing trusted CAs in PEM format.
For detailed documentation of connection parameters see: `pytds.connect`_
.. _Python DBAPI: http://legacy.python.org/dev/peps/pep-0249/
.. _pytds.connect: https://python-tds.readthedocs.io/en/latest/pytds.html#pytds.connect
| zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/README.rst | README.rst |
# Calculates the current version number. If possible, this is the
# output of “git describe”, modified to conform to the versioning
# scheme that setuptools uses. If “git describe” returns an error
# (most likely because we're in an unpacked copy of a release tarball,
# rather than in a git working copy), then we fall back on reading the
# contents of the RELEASE-VERSION file.
#
# To use this script, simply import it your setup.py file, and use the
# results of get_git_version() as your package version:
#
# from version import *
#
# setup(
# version=get_git_version(),
# .
# .
# .
# )
#
# This will automatically update the RELEASE-VERSION file, if
# necessary. Note that the RELEASE-VERSION file should *not* be
# checked into git; please add it to your top-level .gitignore file.
#
# You'll probably want to distribute the RELEASE-VERSION file in your
# sdist tarballs; to do this, just create a MANIFEST.in file that
# contains the following line:
#
# include RELEASE-VERSION
__all__ = ("get_git_version")
from subprocess import Popen, PIPE
def call_git_describe(abbrev=4):
try:
p = Popen(['git', 'describe', '--abbrev=%d' % abbrev],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
return line.strip().decode('utf8')
except:
return None
def read_release_version():
try:
f = open("RELEASE-VERSION", "rb")
try:
version = f.readlines()[0]
return version.strip().decode('utf8')
finally:
f.close()
except:
return None
def write_release_version(version):
f = open("RELEASE-VERSION", "w")
f.write("%s\n" % version)
f.close()
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using “git describe”.
version = call_git_describe(abbrev)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
return 'unknown'
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/version.py | version.py |
import socket
import logging
logger = logging.getLogger(__name__)
class SspiAuth(object):
""" SSPI authentication
:platform: Windows
Required parameters are server_name and port or spn
:keyword user_name: User name, if not provided current security context will be used
:type user_name: str
:keyword password: User password, if not provided current security context will be used
:type password: str
:keyword server_name: MSSQL server host name
:type server_name: str
:keyword port: MSSQL server port
:type port: int
:keyword spn: Service name
:type spn: str
"""
def __init__(self, user_name='', password='', server_name='', port=None, spn=None):
from . import sspi
# parse username/password informations
if '\\' in user_name:
domain, user_name = user_name.split('\\')
else:
domain = ''
if domain and user_name:
self._identity = sspi.make_winnt_identity(
domain,
user_name,
password)
else:
self._identity = None
# build SPN
if spn:
self._sname = spn
else:
primary_host_name, _, _ = socket.gethostbyname_ex(server_name)
self._sname = 'MSSQLSvc/{0}:{1}'.format(primary_host_name, port)
# using Negotiate system will use proper protocol (either NTLM or Kerberos)
self._cred = sspi.SspiCredentials(
package='Negotiate',
use=sspi.SECPKG_CRED_OUTBOUND,
identity=self._identity)
self._flags = sspi.ISC_REQ_CONFIDENTIALITY | sspi.ISC_REQ_REPLAY_DETECT | sspi.ISC_REQ_CONNECTION
self._ctx = None
def create_packet(self):
from . import sspi
import ctypes
buf = ctypes.create_string_buffer(4096)
self._ctx, status, bufs = self._cred.create_context(
flags=self._flags,
byte_ordering='network',
target_name=self._sname,
output_buffers=[(sspi.SECBUFFER_TOKEN, buf)])
if status == sspi.Status.SEC_I_COMPLETE_AND_CONTINUE:
self._ctx.complete_auth_token(bufs)
return bufs[0][1]
def handle_next(self, packet):
from . import sspi
import ctypes
buf = ctypes.create_string_buffer(4096)
status, buffers = self._ctx.next(
flags=self._flags,
byte_ordering='network',
target_name=self._sname,
input_buffers=[(sspi.SECBUFFER_TOKEN, packet)],
output_buffers=[(sspi.SECBUFFER_TOKEN, buf)])
return buffers[0][1]
def close(self):
self._ctx.close()
self._cred.close()
class NtlmAuth(object):
""" NTLM authentication, uses Python implementation
:param user_name: User name
:type user_name: str
:param password: User password
:type password: str
"""
def __init__(self, user_name, password):
self._user_name = user_name
if '\\' in user_name:
domain, self._user = user_name.split('\\', 1)
self._domain = domain.upper()
else:
self._domain = 'WORKSPACE'
self._user = user_name
self._password = password
try:
from ntlm_auth.ntlm import NegotiateFlags
except ImportError:
raise ImportError("To use NTLM authentication you need to install ntlm-auth module")
self._nego_flags = NegotiateFlags.NTLMSSP_NEGOTIATE_128 | \
NegotiateFlags.NTLMSSP_NEGOTIATE_56 | \
NegotiateFlags.NTLMSSP_NEGOTIATE_UNICODE | \
NegotiateFlags.NTLMSSP_NEGOTIATE_VERSION | \
NegotiateFlags.NTLMSSP_REQUEST_TARGET | \
NegotiateFlags.NTLMSSP_NEGOTIATE_NTLM | \
NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY | \
NegotiateFlags.NTLMSSP_NEGOTIATE_ALWAYS_SIGN
self._ntlm_compat = 2
self._workstation = socket.gethostname().upper()
def create_packet(self):
import ntlm_auth.ntlm
return ntlm_auth.ntlm.NegotiateMessage(
negotiate_flags=self._nego_flags,
domain_name=self._domain,
workstation=self._workstation,
).get_data()
def handle_next(self, packet):
import ntlm_auth.ntlm
challenge = ntlm_auth.ntlm.ChallengeMessage(packet)
return ntlm_auth.ntlm.AuthenticateMessage(
user_name=self._user,
password=self._password,
domain_name=self._domain,
workstation=self._workstation,
challenge_message=challenge,
ntlm_compatibility=self._ntlm_compat,
server_certificate_hash=None,
).get_data()
def close(self):
pass
class KerberosAuth(object):
def __init__(self, server_principal):
try:
import kerberos
except ImportError:
import winkerberos as kerberos
self._kerberos = kerberos
res, context = kerberos.authGSSClientInit(server_principal)
if res < 0:
raise RuntimeError('authGSSClientInit failed with code {}'.format(res))
logger.info('Initialized GSS context')
self._context = context
def create_packet(self):
import base64
res = self._kerberos.authGSSClientStep(self._context, '')
if res < 0:
raise RuntimeError('authGSSClientStep failed with code {}'.format(res))
data = self._kerberos.authGSSClientResponse(self._context)
logger.info('created first client GSS packet %s', data)
return base64.b64decode(data)
def handle_next(self, packet):
import base64
res = self._kerberos.authGSSClientStep(self._context, base64.b64encode(packet).decode('ascii'))
if res < 0:
raise RuntimeError('authGSSClientStep failed with code {}'.format(res))
if res == self._kerberos.AUTH_GSS_COMPLETE:
logger.info('GSS authentication completed')
return b''
else:
data = self._kerberos.authGSSClientResponse(self._context)
logger.info('created client GSS packet %s', data)
return base64.b64decode(data)
def close(self):
pass | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/login.py | login.py |
import codecs
import contextlib
import logging
import datetime
import six
import socket
import struct
from .collate import ucs2_codec, Collation, lcid2charset, raw_collation
from . import tds_base
from . import tds_types
from . import tls
from .tds_base import readall, readall_fast, skipall, PreLoginEnc, PreLoginToken
logger = logging.getLogger()
# packet header
# https://msdn.microsoft.com/en-us/library/dd340948.aspx
_header = struct.Struct('>BBHHBx')
_byte = struct.Struct('B')
_smallint_le = struct.Struct('<h')
_smallint_be = struct.Struct('>h')
_usmallint_le = struct.Struct('<H')
_usmallint_be = struct.Struct('>H')
_int_le = struct.Struct('<l')
_int_be = struct.Struct('>l')
_uint_le = struct.Struct('<L')
_uint_be = struct.Struct('>L')
_int8_le = struct.Struct('<q')
_int8_be = struct.Struct('>q')
_uint8_le = struct.Struct('<Q')
_uint8_be = struct.Struct('>Q')
logging_enabled = False
# stored procedure output parameter
class output(object):
@property
def type(self):
"""
This is either the sql type declaration or python type instance
of the parameter.
"""
return self._type
@property
def value(self):
"""
This is the value of the parameter.
"""
return self._value
def __init__(self, value=None, param_type=None):
""" Creates procedure output parameter.
:param param_type: either sql type declaration or python type
:param value: value to pass into procedure
"""
if param_type is None:
if value is None or value is default:
raise ValueError('Output type cannot be autodetected')
elif isinstance(param_type, type) and value is not None:
if value is not default and not isinstance(value, param_type):
raise ValueError('value should match param_type, value is {}, param_type is \'{}\''.format(repr(value), param_type.__name__))
self._type = param_type
self._value = value
class _Default(object):
pass
default = _Default()
def tds7_crypt_pass(password):
""" Mangle password according to tds rules
:param password: Password str
:returns: Byte-string with encoded password
"""
encoded = bytearray(ucs2_codec.encode(password)[0])
for i, ch in enumerate(encoded):
encoded[i] = ((ch << 4) & 0xff | (ch >> 4)) ^ 0xA5
return encoded
class _TdsEnv:
def __init__(self):
self.database = None
self.language = None
self.charset = None
class _TdsReader(object):
""" TDS stream reader
Provides stream-like interface for TDS packeted stream.
Also provides convinience methods to decode primitive data like
different kinds of integers etc.
"""
def __init__(self, session):
self._buf = bytearray(b'\x00' * 4096)
self._bufview = memoryview(self._buf)
self._pos = len(self._buf) # position in the buffer
self._have = 0 # number of bytes read from packet
self._size = 0 # size of current packet
self._session = session
self._transport = session._transport
self._type = None
self._status = None
def set_block_size(self, size):
self._buf = bytearray(b'\x00' * size)
self._bufview = memoryview(self._buf)
def get_block_size(self):
return len(self._buf)
@property
def session(self):
""" Link to :class:`_TdsSession` object
"""
return self._session
@property
def packet_type(self):
""" Type of current packet
Possible values are TDS_QUERY, TDS_LOGIN, etc.
"""
return self._type
def read_fast(self, size):
""" Faster version of read
Instead of returning sliced buffer it returns reference to internal
buffer and the offset to this buffer.
:param size: Number of bytes to read
:returns: Tuple of bytes buffer, and offset in this buffer
"""
if self._pos >= self._size:
self._read_packet()
offset = self._pos
to_read = min(size, self._size - self._pos)
self._pos += to_read
return self._buf, offset
def recv(self, size):
if self._pos >= self._size:
self._read_packet()
offset = self._pos
to_read = min(size, self._size - self._pos)
self._pos += to_read
return self._buf[offset:offset+to_read]
def unpack(self, struc):
""" Unpacks given structure from stream
:param struc: A struct.Struct instance
:returns: Result of unpacking
"""
buf, offset = readall_fast(self, struc.size)
return struc.unpack_from(buf, offset)
def get_byte(self):
""" Reads one byte from stream """
return self.unpack(_byte)[0]
def get_smallint(self):
""" Reads 16bit signed integer from the stream """
return self.unpack(_smallint_le)[0]
def get_usmallint(self):
""" Reads 16bit unsigned integer from the stream """
return self.unpack(_usmallint_le)[0]
def get_int(self):
""" Reads 32bit signed integer from the stream """
return self.unpack(_int_le)[0]
def get_uint(self):
""" Reads 32bit unsigned integer from the stream """
return self.unpack(_uint_le)[0]
def get_uint_be(self):
""" Reads 32bit unsigned big-endian integer from the stream """
return self.unpack(_uint_be)[0]
def get_uint8(self):
""" Reads 64bit unsigned integer from the stream """
return self.unpack(_uint8_le)[0]
def get_int8(self):
""" Reads 64bit signed integer from the stream """
return self.unpack(_int8_le)[0]
def read_ucs2(self, num_chars):
""" Reads num_chars UCS2 string from the stream """
buf = readall(self, num_chars * 2)
return ucs2_codec.decode(buf)[0]
def read_str(self, size, codec):
""" Reads byte string from the stream and decodes it
:param size: Size of string in bytes
:param codec: Instance of codec to decode string
:returns: Unicode string
"""
return codec.decode(readall(self, size))[0]
def get_collation(self):
""" Reads :class:`Collation` object from stream """
buf = readall(self, Collation.wire_size)
return Collation.unpack(buf)
def _read_packet(self):
""" Reads next TDS packet from the underlying transport
If timeout is happened during reading of packet's header will
cancel current request.
Can only be called when transport's read pointer is at the begining
of the packet.
"""
try:
pos = 0
while pos < _header.size:
received = self._transport.recv_into(self._bufview[pos:], _header.size - pos)
if received == 0:
raise tds_base.ClosedConnectionError()
pos += received
except tds_base.TimeoutError:
self._session.put_cancel()
raise
self._pos = _header.size
self._type, self._status, self._size, self._session._spid, _ = _header.unpack_from(self._bufview, 0)
self._have = pos
while pos < self._size:
received = self._transport.recv_into(self._bufview[pos:], self._size - pos)
if received == 0:
raise tds_base.ClosedConnectionError()
pos += received
self._have += received
def read_whole_packet(self):
""" Reads single packet and returns bytes payload of the packet
Can only be called when transport's read pointer is at the beginning
of the packet.
"""
self._read_packet()
return readall(self, self._size - _header.size)
class _TdsWriter(object):
""" TDS stream writer
Handles splitting of incoming data into TDS packets according to TDS protocol.
Provides convinience methods for writing primitive data types.
"""
def __init__(self, session, bufsize):
self._session = session
self._tds = session
self._transport = session._transport
self._pos = 0
self._buf = bytearray(bufsize)
self._packet_no = 0
self._type = 0
@property
def session(self):
""" Back reference to parent :class:`_TdsSession` object """
return self._session
@property
def bufsize(self):
""" Size of the buffer """
return len(self._buf)
@bufsize.setter
def bufsize(self, bufsize):
if len(self._buf) == bufsize:
return
if bufsize > len(self._buf):
self._buf.extend(b'\0' * (bufsize - len(self._buf)))
else:
self._buf = self._buf[0:bufsize]
def begin_packet(self, packet_type):
""" Starts new packet stream
:param packet_type: Type of TDS stream, e.g. TDS_PRELOGIN, TDS_QUERY etc.
"""
self._type = packet_type
self._pos = 8
def pack(self, struc, *args):
""" Packs and writes structure into stream """
self.write(struc.pack(*args))
def put_byte(self, value):
""" Writes single byte into stream """
self.pack(_byte, value)
def put_smallint(self, value):
""" Writes 16-bit signed integer into the stream """
self.pack(_smallint_le, value)
def put_usmallint(self, value):
""" Writes 16-bit unsigned integer into the stream """
self.pack(_usmallint_le, value)
def put_usmallint_be(self, value):
""" Writes 16-bit unsigned big-endian integer into the stream """
self.pack(_usmallint_be, value)
def put_int(self, value):
""" Writes 32-bit signed integer into the stream """
self.pack(_int_le, value)
def put_uint(self, value):
""" Writes 32-bit unsigned integer into the stream """
self.pack(_uint_le, value)
def put_uint_be(self, value):
""" Writes 32-bit unsigned big-endian integer into the stream """
self.pack(_uint_be, value)
def put_int8(self, value):
""" Writes 64-bit signed integer into the stream """
self.pack(_int8_le, value)
def put_uint8(self, value):
""" Writes 64-bit unsigned integer into the stream """
self.pack(_uint8_le, value)
def put_collation(self, collation):
""" Writes :class:`Collation` structure into the stream """
self.write(collation.pack())
def write(self, data):
""" Writes given bytes buffer into the stream
Function returns only when entire buffer is written
"""
data_off = 0
while data_off < len(data):
left = len(self._buf) - self._pos
if left <= 0:
self._write_packet(final=False)
else:
to_write = min(left, len(data) - data_off)
self._buf[self._pos:self._pos + to_write] = data[data_off:data_off + to_write]
self._pos += to_write
data_off += to_write
def write_b_varchar(self, s):
self.put_byte(len(s))
self.write_ucs2(s)
def write_ucs2(self, s):
""" Write string encoding it in UCS2 into stream """
self.write_string(s, ucs2_codec)
def write_string(self, s, codec):
""" Write string encoding it with codec into stream """
for i in range(0, len(s), self.bufsize):
chunk = s[i:i + self.bufsize]
buf, consumed = codec.encode(chunk)
assert consumed == len(chunk)
self.write(buf)
def flush(self):
""" Closes current packet stream """
return self._write_packet(final=True)
def _write_packet(self, final):
""" Writes single TDS packet into underlying transport.
Data for the packet is taken from internal buffer.
:param final: True means this is the final packet in substream.
"""
status = 1 if final else 0
_header.pack_into(self._buf, 0, self._type, status, self._pos, 0, self._packet_no)
self._packet_no = (self._packet_no + 1) % 256
self._transport.sendall(self._buf[:self._pos])
self._pos = 8
def _create_exception_by_message(msg, custom_error_msg=None):
msg_no = msg['msgno']
if custom_error_msg is not None:
error_msg = custom_error_msg
else:
error_msg = msg['message']
if msg_no in tds_base.prog_errors:
ex = tds_base.ProgrammingError(error_msg)
elif msg_no in tds_base.integrity_errors:
ex = tds_base.IntegrityError(error_msg)
else:
ex = tds_base.OperationalError(error_msg)
ex.msg_no = msg['msgno']
ex.text = msg['message']
ex.srvname = msg['server']
ex.procname = msg['proc_name']
ex.number = msg['msgno']
ex.severity = msg['severity']
ex.state = msg['state']
ex.line = msg['line_number']
return ex
class _TdsSession(object):
""" TDS session
Represents a single TDS session within MARS connection, when MARS enabled there could be multiple TDS sessions
within one connection.
"""
def __init__(self, tds, transport, tzinfo_factory):
self.out_pos = 8
self.res_info = None
self.in_cancel = False
self.wire_mtx = None
self.param_info = None
self.has_status = False
self.ret_status = None
self.skipped_to_status = False
self._transport = transport
self._reader = _TdsReader(self)
self._reader._transport = transport
self._writer = _TdsWriter(self, tds.bufsize)
self._writer._transport = transport
self.in_buf_max = 0
self.state = tds_base.TDS_IDLE
self._tds = tds
self.messages = []
self.rows_affected = -1
self.use_tz = tds.use_tz
self._spid = 0
self.tzinfo_factory = tzinfo_factory
self.more_rows = False
self.done_flags = 0
self.internal_sp_called = 0
self.output_params = {}
self.authentication = None
self.return_value_index = 0
self._out_params_indexes = []
self.row = None
self.end_marker = 0
def log_response_message(self, msg):
# logging is disabled by default
if logging_enabled:
logger.info('[%d] %s', self._spid, msg)
def __repr__(self):
fmt = "<_TdsSession state={} tds={} messages={} rows_affected={} use_tz={} spid={} in_cancel={}>"
res = fmt.format(repr(self.state), repr(self._tds), repr(self.messages),
repr(self.rows_affected), repr(self.use_tz), repr(self._spid),
self.in_cancel)
return res
def raise_db_exception(self):
""" Raises exception from last server message
This function will skip messages: The statement has been terminated
"""
if not self.messages:
raise tds_base.Error("Request failed, server didn't send error message")
msg = None
while True:
msg = self.messages[-1]
if msg['msgno'] == 3621: # the statement has been terminated
self.messages = self.messages[:-1]
else:
break
error_msg = ' '.join(m['message'] for m in self.messages)
ex = _create_exception_by_message(msg, error_msg)
raise ex
def get_type_info(self, curcol):
""" Reads TYPE_INFO structure (http://msdn.microsoft.com/en-us/library/dd358284.aspx)
:param curcol: An instance of :class:`Column` that will receive read information
"""
r = self._reader
# User defined data type of the column
if tds_base.IS_TDS72_PLUS(self):
user_type = r.get_uint()
else:
user_type = r.get_usmallint()
curcol.column_usertype = user_type
curcol.flags = r.get_usmallint() # Flags
type_id = r.get_byte()
serializer_class = self._tds.type_factory.get_type_serializer(type_id)
curcol.serializer = serializer_class.from_stream(r)
def tds7_process_result(self):
""" Reads and processes COLMETADATA stream
This stream contains a list of returned columns.
Stream format link: http://msdn.microsoft.com/en-us/library/dd357363.aspx
"""
self.log_response_message('got COLMETADATA')
r = self._reader
# read number of columns and allocate the columns structure
num_cols = r.get_smallint()
# This can be a DUMMY results token from a cursor fetch
if num_cols == -1:
return
self.param_info = None
self.has_status = False
self.ret_status = None
self.skipped_to_status = False
self.rows_affected = tds_base.TDS_NO_COUNT
self.more_rows = True
self.row = [None] * num_cols
self.res_info = info = _Results()
#
# loop through the columns populating COLINFO struct from
# server response
#
header_tuple = []
for col in range(num_cols):
curcol = tds_base.Column()
info.columns.append(curcol)
self.get_type_info(curcol)
curcol.column_name = r.read_ucs2(r.get_byte())
precision = curcol.serializer.precision
scale = curcol.serializer.scale
size = curcol.serializer.size
header_tuple.append(
(curcol.column_name,
curcol.serializer.get_typeid(),
None,
size,
precision,
scale,
curcol.flags & tds_base.Column.fNullable))
info.description = tuple(header_tuple)
return info
def process_param(self):
""" Reads and processes RETURNVALUE stream.
This stream is used to send OUTPUT parameters from RPC to client.
Stream format url: http://msdn.microsoft.com/en-us/library/dd303881.aspx
"""
self.log_response_message('got RETURNVALUE message')
r = self._reader
if tds_base.IS_TDS72_PLUS(self):
ordinal = r.get_usmallint()
else:
r.get_usmallint() # ignore size
ordinal = self._out_params_indexes[self.return_value_index]
name = r.read_ucs2(r.get_byte())
r.get_byte() # 1 - OUTPUT of sp, 2 - result of udf
param = tds_base.Column()
param.column_name = name
self.get_type_info(param)
param.value = param.serializer.read(r)
self.output_params[ordinal] = param
self.return_value_index += 1
def process_cancel(self):
"""
Process the incoming token stream until it finds
an end token DONE with the cancel flag set.
At that point the connection should be ready to handle a new query.
In case when no cancel request is pending this function does nothing.
"""
self.log_response_message('got CANCEL message')
# silly cases, nothing to do
if not self.in_cancel:
return
while True:
token_id = self.get_token_id()
self.process_token(token_id)
if not self.in_cancel:
return
def process_msg(self, marker):
""" Reads and processes ERROR/INFO streams
Stream formats:
- ERROR: http://msdn.microsoft.com/en-us/library/dd304156.aspx
- INFO: http://msdn.microsoft.com/en-us/library/dd303398.aspx
:param marker: TDS_ERROR_TOKEN or TDS_INFO_TOKEN
"""
self.log_response_message('got ERROR/INFO message')
r = self._reader
r.get_smallint() # size
msg = {'marker': marker, 'msgno': r.get_int(), 'state': r.get_byte(), 'severity': r.get_byte(),
'sql_state': None}
if marker == tds_base.TDS_INFO_TOKEN:
msg['priv_msg_type'] = 0
elif marker == tds_base.TDS_ERROR_TOKEN:
msg['priv_msg_type'] = 1
else:
logger.error('tds_process_msg() called with unknown marker "{0}"'.format(marker))
msg['message'] = r.read_ucs2(r.get_smallint())
# server name
msg['server'] = r.read_ucs2(r.get_byte())
# stored proc name if available
msg['proc_name'] = r.read_ucs2(r.get_byte())
msg['line_number'] = r.get_int() if tds_base.IS_TDS72_PLUS(self) else r.get_smallint()
# in case extended error data is sent, we just try to discard it
# special case
self.messages.append(msg)
def process_row(self):
""" Reads and handles ROW stream.
This stream contains list of values of one returned row.
Stream format url: http://msdn.microsoft.com/en-us/library/dd357254.aspx
"""
self.log_response_message("got ROW message")
r = self._reader
info = self.res_info
info.row_count += 1
for i, curcol in enumerate(info.columns):
curcol.value = self.row[i] = curcol.serializer.read(r)
def process_nbcrow(self):
""" Reads and handles NBCROW stream.
This stream contains list of values of one returned row in a compressed way,
introduced in TDS 7.3.B
Stream format url: http://msdn.microsoft.com/en-us/library/dd304783.aspx
"""
self.log_response_message("got NBCROW message")
r = self._reader
info = self.res_info
if not info:
self.bad_stream('got row without info')
assert len(info.columns) > 0
info.row_count += 1
# reading bitarray for nulls, 1 represent null values for
# corresponding fields
nbc = readall(r, (len(info.columns) + 7) // 8)
for i, curcol in enumerate(info.columns):
if tds_base.my_ord(nbc[i // 8]) & (1 << (i % 8)):
value = None
else:
value = curcol.serializer.read(r)
self.row[i] = value
def process_orderby(self):
""" Reads and processes ORDER stream
Used to inform client by which column dataset is ordered.
Stream format url: http://msdn.microsoft.com/en-us/library/dd303317.aspx
"""
r = self._reader
skipall(r, r.get_smallint())
def process_end(self, marker):
""" Reads and processes DONE/DONEINPROC/DONEPROC streams
Stream format urls:
- DONE: http://msdn.microsoft.com/en-us/library/dd340421.aspx
- DONEINPROC: http://msdn.microsoft.com/en-us/library/dd340553.aspx
- DONEPROC: http://msdn.microsoft.com/en-us/library/dd340753.aspx
:param marker: Can be TDS_DONE_TOKEN or TDS_DONEINPROC_TOKEN or TDS_DONEPROC_TOKEN
"""
code_to_str = {
tds_base.TDS_DONE_TOKEN: 'DONE',
tds_base.TDS_DONEINPROC_TOKEN: 'DONEINPROC',
tds_base.TDS_DONEPROC_TOKEN: 'DONEPROC',
}
self.end_marker = marker
self.more_rows = False
r = self._reader
status = r.get_usmallint()
r.get_usmallint() # cur_cmd
more_results = status & tds_base.TDS_DONE_MORE_RESULTS != 0
was_cancelled = status & tds_base.TDS_DONE_CANCELLED != 0
done_count_valid = status & tds_base.TDS_DONE_COUNT != 0
if self.res_info:
self.res_info.more_results = more_results
rows_affected = r.get_int8() if tds_base.IS_TDS72_PLUS(self) else r.get_int()
self.log_response_message("got {} message, more_res={}, cancelled={}, rows_affected={}".format(
code_to_str[marker], more_results, was_cancelled, rows_affected))
if was_cancelled or (not more_results and not self.in_cancel):
self.in_cancel = False
self.set_state(tds_base.TDS_IDLE)
if done_count_valid:
self.rows_affected = rows_affected
else:
self.rows_affected = -1
self.done_flags = status
if self.done_flags & tds_base.TDS_DONE_ERROR and not was_cancelled and not self.in_cancel:
self.raise_db_exception()
def process_env_chg(self):
""" Reads and processes ENVCHANGE stream.
Stream info url: http://msdn.microsoft.com/en-us/library/dd303449.aspx
"""
self.log_response_message("got ENVCHANGE message")
r = self._reader
size = r.get_smallint()
type_id = r.get_byte()
if type_id == tds_base.TDS_ENV_SQLCOLLATION:
size = r.get_byte()
self.conn.collation = r.get_collation()
logger.info('switched collation to %s', self.conn.collation)
skipall(r, size - 5)
# discard old one
skipall(r, r.get_byte())
elif type_id == tds_base.TDS_ENV_BEGINTRANS:
size = r.get_byte()
assert size == 8
self.conn.tds72_transaction = r.get_uint8()
skipall(r, r.get_byte())
elif type_id == tds_base.TDS_ENV_COMMITTRANS or type_id == tds_base.TDS_ENV_ROLLBACKTRANS:
self.conn.tds72_transaction = 0
skipall(r, r.get_byte())
skipall(r, r.get_byte())
elif type_id == tds_base.TDS_ENV_PACKSIZE:
newval = r.read_ucs2(r.get_byte())
r.read_ucs2(r.get_byte())
new_block_size = int(newval)
if new_block_size >= 512:
# Is possible to have a shrink if server limits packet
# size more than what we specified
#
# Reallocate buffer if possible (strange values from server or out of memory) use older buffer */
self._writer.bufsize = new_block_size
elif type_id == tds_base.TDS_ENV_DATABASE:
newval = r.read_ucs2(r.get_byte())
logger.info('switched to database %s', newval)
r.read_ucs2(r.get_byte())
self.conn.env.database = newval
elif type_id == tds_base.TDS_ENV_LANG:
newval = r.read_ucs2(r.get_byte())
logger.info('switched language to %s', newval)
r.read_ucs2(r.get_byte())
self.conn.env.language = newval
elif type_id == tds_base.TDS_ENV_CHARSET:
newval = r.read_ucs2(r.get_byte())
logger.info('switched charset to %s', newval)
r.read_ucs2(r.get_byte())
self.conn.env.charset = newval
remap = {'iso_1': 'iso8859-1'}
self.conn.server_codec = codecs.lookup(remap.get(newval, newval))
elif type_id == tds_base.TDS_ENV_DB_MIRRORING_PARTNER:
newval = r.read_ucs2(r.get_byte())
logger.info('got mirroring partner %s', newval)
r.read_ucs2(r.get_byte())
elif type_id == tds_base.TDS_ENV_LCID:
lcid = int(r.read_ucs2(r.get_byte()))
logger.info('switched lcid to %s', lcid)
self.conn.server_codec = codecs.lookup(lcid2charset(lcid))
r.read_ucs2(r.get_byte())
elif type_id == tds_base.TDS_ENV_UNICODE_DATA_SORT_COMP_FLAGS:
old_comp_flags = r.read_ucs2(r.get_byte())
comp_flags = r.read_ucs2(r.get_byte())
self.conn.comp_flags = comp_flags
elif type_id == 20:
# routing
sz = r.get_usmallint()
protocol = r.get_byte()
protocol_property = r.get_usmallint()
alt_server = r.read_ucs2(r.get_usmallint())
logger.info('got routing info proto=%d proto_prop=%d alt_srv=%s', protocol, protocol_property, alt_server)
self.conn.route = {
'server': alt_server,
'port': protocol_property,
}
# OLDVALUE = 0x00, 0x00
r.get_usmallint()
else:
logger.warning("unknown env type: {0}, skipping".format(type_id))
# discard byte values, not still supported
skipall(r, size - 1)
def process_auth(self):
""" Reads and processes SSPI stream.
Stream info: http://msdn.microsoft.com/en-us/library/dd302844.aspx
"""
r = self._reader
w = self._writer
pdu_size = r.get_smallint()
if not self.authentication:
raise tds_base.Error('Got unexpected token')
packet = self.authentication.handle_next(readall(r, pdu_size))
if packet:
w.write(packet)
w.flush()
def is_connected(self):
"""
:return: True if transport is connected
"""
return self._transport.is_connected()
def bad_stream(self, msg):
""" Called when input stream contains unexpected data.
Will close stream and raise :class:`InterfaceError`
:param msg: Message for InterfaceError exception.
:return: Never returns, always raises exception.
"""
self.close()
raise tds_base.InterfaceError(msg)
@property
def tds_version(self):
""" Returns integer encoded current TDS protocol version
"""
return self._tds.tds_version
@property
def conn(self):
""" Reference to owning :class:`_TdsSocket`
"""
return self._tds
def close(self):
self._transport.close()
def set_state(self, state):
""" Switches state of the TDS session.
It also does state transitions checks.
:param state: New state, one of TDS_PENDING/TDS_READING/TDS_IDLE/TDS_DEAD/TDS_QUERING
"""
prior_state = self.state
if state == prior_state:
return state
if state == tds_base.TDS_PENDING:
if prior_state in (tds_base.TDS_READING, tds_base.TDS_QUERYING):
self.state = tds_base.TDS_PENDING
else:
raise tds_base.InterfaceError('logic error: cannot chage query state from {0} to {1}'.
format(tds_base.state_names[prior_state], tds_base.state_names[state]))
elif state == tds_base.TDS_READING:
# transition to READING are valid only from PENDING
if self.state != tds_base.TDS_PENDING:
raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'.
format(tds_base.state_names[prior_state], tds_base.state_names[state]))
else:
self.state = state
elif state == tds_base.TDS_IDLE:
if prior_state == tds_base.TDS_DEAD:
raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'.
format(tds_base.state_names[prior_state], tds_base.state_names[state]))
self.state = state
elif state == tds_base.TDS_DEAD:
self.state = state
elif state == tds_base.TDS_QUERYING:
if self.state == tds_base.TDS_DEAD:
raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'.
format(tds_base.state_names[prior_state], tds_base.state_names[state]))
elif self.state != tds_base.TDS_IDLE:
raise tds_base.InterfaceError('logic error: cannot change query state from {0} to {1}'.
format(tds_base.state_names[prior_state], tds_base.state_names[state]))
else:
self.rows_affected = tds_base.TDS_NO_COUNT
self.internal_sp_called = 0
self.state = state
else:
assert False
return self.state
@contextlib.contextmanager
def querying_context(self, packet_type):
""" Context manager for querying.
Sets state to TDS_QUERYING, and reverts it to TDS_IDLE if exception happens inside managed block,
and to TDS_PENDING if managed block succeeds and flushes buffer.
"""
if self.set_state(tds_base.TDS_QUERYING) != tds_base.TDS_QUERYING:
raise tds_base.Error("Couldn't switch to state")
self._writer.begin_packet(packet_type)
try:
yield
except:
if self.state != tds_base.TDS_DEAD:
self.set_state(tds_base.TDS_IDLE)
raise
else:
self.set_state(tds_base.TDS_PENDING)
self._writer.flush()
def make_param(self, name, value):
""" Generates instance of :class:`Column` from value and name
Value can also be of a special types:
- An instance of :class:`Column`, in which case it is just returned.
- An instance of :class:`output`, in which case parameter will become
an output parameter.
- A singleton :var:`default`, in which case default value will be passed
into a stored proc.
:param name: Name of the parameter, will populate column_name property of returned column.
:param value: Value of the parameter, also used to guess the type of parameter.
:return: An instance of :class:`Column`
"""
if isinstance(value, tds_base.Column):
value.column_name = name
return value
column = tds_base.Column()
column.column_name = name
column.flags = 0
if isinstance(value, output):
column.flags |= tds_base.fByRefValue
if isinstance(value.type, six.string_types):
column.type = tds_types.sql_type_by_declaration(value.type)
elif value.type:
column.type = self.conn.type_inferrer.from_class(value.type)
value = value.value
if value is default:
column.flags |= tds_base.fDefaultValue
value = None
column.value = value
if column.type is None:
column.type = self.conn.type_inferrer.from_value(value)
return column
def _convert_params(self, parameters):
""" Converts a dict of list of parameters into a list of :class:`Column` instances.
:param parameters: Can be a list of parameter values, or a dict of parameter names to values.
:return: A list of :class:`Column` instances.
"""
if isinstance(parameters, dict):
return [self.make_param(name, value)
for name, value in parameters.items()]
else:
params = []
for parameter in parameters:
params.append(self.make_param('', parameter))
return params
def cancel_if_pending(self):
""" Cancels current pending request.
Does nothing if no request is pending, otherwise sends cancel request,
and waits for response.
"""
if self.state == tds_base.TDS_IDLE:
return
if not self.in_cancel:
self.put_cancel()
self.process_cancel()
def submit_rpc(self, rpc_name, params, flags=0):
""" Sends an RPC request.
This call will transition session into pending state.
If some operation is currently pending on the session, it will be
cancelled before sending this request.
Spec: http://msdn.microsoft.com/en-us/library/dd357576.aspx
:param rpc_name: Name of the RPC to call, can be an instance of :class:`InternalProc`
:param params: Stored proc parameters, should be a list of :class:`Column` instances.
:param flags: See spec for possible flags.
"""
logger.info('Sending RPC %s flags=%d', rpc_name, flags)
self.messages = []
self.output_params = {}
self.cancel_if_pending()
self.res_info = None
w = self._writer
with self.querying_context(tds_base.PacketType.RPC):
if tds_base.IS_TDS72_PLUS(self):
self._start_query()
if tds_base.IS_TDS71_PLUS(self) and isinstance(rpc_name, tds_base.InternalProc):
w.put_smallint(-1)
w.put_smallint(rpc_name.proc_id)
else:
if isinstance(rpc_name, tds_base.InternalProc):
rpc_name = rpc_name.name
w.put_smallint(len(rpc_name))
w.write_ucs2(rpc_name)
#
# TODO support flags
# bit 0 (1 as flag) in TDS7/TDS5 is "recompile"
# bit 1 (2 as flag) in TDS7+ is "no metadata" bit this will prevent sending of column infos
#
w.put_usmallint(flags)
self._out_params_indexes = []
for i, param in enumerate(params):
if param.flags & tds_base.fByRefValue:
self._out_params_indexes.append(i)
w.put_byte(len(param.column_name))
w.write_ucs2(param.column_name)
#
# TODO support other flags (use defaul null/no metadata)
# bit 1 (2 as flag) in TDS7+ is "default value" bit
# (what's the meaning of "default value" ?)
#
w.put_byte(param.flags)
# TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx
serializer = param.choose_serializer(
type_factory=self._tds.type_factory,
collation=self._tds.collation or raw_collation
)
type_id = serializer.type
w.put_byte(type_id)
serializer.write_info(w)
serializer.write(w, param.value)
def submit_plain_query(self, operation):
""" Sends a plain query to server.
This call will transition session into pending state.
If some operation is currently pending on the session, it will be
cancelled before sending this request.
Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx
:param operation: A string representing sql statement.
"""
self.messages = []
self.cancel_if_pending()
self.res_info = None
logger.info("Sending query %s", operation[:100])
w = self._writer
with self.querying_context(tds_base.PacketType.QUERY):
if tds_base.IS_TDS72_PLUS(self):
self._start_query()
w.write_ucs2(operation)
def submit_bulk(self, metadata, rows):
""" Sends insert bulk command.
Spec: http://msdn.microsoft.com/en-us/library/dd358082.aspx
:param metadata: A list of :class:`Column` instances.
:param rows: A collection of rows, each row is a collection of values.
:return:
"""
logger.info('Sending INSERT BULK')
num_cols = len(metadata)
w = self._writer
serializers = []
with self.querying_context(tds_base.PacketType.BULK):
w.put_byte(tds_base.TDS7_RESULT_TOKEN)
w.put_usmallint(num_cols)
for col in metadata:
if tds_base.IS_TDS72_PLUS(self):
w.put_uint(col.column_usertype)
else:
w.put_usmallint(col.column_usertype)
w.put_usmallint(col.flags)
serializer = col.choose_serializer(
type_factory=self._tds.type_factory,
collation=self._tds.collation,
)
type_id = serializer.type
w.put_byte(type_id)
serializers.append(serializer)
serializer.write_info(w)
w.put_byte(len(col.column_name))
w.write_ucs2(col.column_name)
for row in rows:
w.put_byte(tds_base.TDS_ROW_TOKEN)
for i, col in enumerate(metadata):
serializers[i].write(w, row[i])
# https://msdn.microsoft.com/en-us/library/dd340421.aspx
w.put_byte(tds_base.TDS_DONE_TOKEN)
w.put_usmallint(tds_base.TDS_DONE_FINAL)
w.put_usmallint(0) # curcmd
# row count
if tds_base.IS_TDS72_PLUS(self):
w.put_int8(0)
else:
w.put_int(0)
def put_cancel(self):
""" Sends a cancel request to the server.
Switches connection to IN_CANCEL state.
"""
logger.info('Sending CANCEL')
self._writer.begin_packet(tds_base.PacketType.CANCEL)
self._writer.flush()
self.in_cancel = 1
_begin_tran_struct_72 = struct.Struct('<HBB')
def begin_tran(self, isolation_level=0):
logger.info('Sending BEGIN TRAN il=%x', isolation_level)
self.submit_begin_tran(isolation_level=isolation_level)
self.process_simple_request()
def submit_begin_tran(self, isolation_level=0):
if tds_base.IS_TDS72_PLUS(self):
self.messages = []
self.cancel_if_pending()
w = self._writer
with self.querying_context(tds_base.PacketType.TRANS):
self._start_query()
w.pack(
self._begin_tran_struct_72,
5, # TM_BEGIN_XACT
isolation_level,
0, # new transaction name
)
else:
self.submit_plain_query("BEGIN TRANSACTION")
self.conn.tds72_transaction = 1
_commit_rollback_tran_struct72_hdr = struct.Struct('<HBB')
_continue_tran_struct72 = struct.Struct('<BB')
def rollback(self, cont, isolation_level=0):
logger.info('Sending ROLLBACK TRAN')
self.submit_rollback(cont, isolation_level=isolation_level)
prev_timeout = self._tds.sock.gettimeout()
self._tds.sock.settimeout(None)
try:
self.process_simple_request()
finally:
self._tds.sock.settimeout(prev_timeout)
def submit_rollback(self, cont, isolation_level=0):
if tds_base.IS_TDS72_PLUS(self):
self.messages = []
self.cancel_if_pending()
w = self._writer
with self.querying_context(tds_base.PacketType.TRANS):
self._start_query()
flags = 0
if cont:
flags |= 1
w.pack(
self._commit_rollback_tran_struct72_hdr,
8, # TM_ROLLBACK_XACT
0, # transaction name
flags,
)
if cont:
w.pack(
self._continue_tran_struct72,
isolation_level,
0, # new transaction name
)
else:
self.submit_plain_query(
"IF @@TRANCOUNT > 0 ROLLBACK BEGIN TRANSACTION" if cont else "IF @@TRANCOUNT > 0 ROLLBACK")
self.conn.tds72_transaction = 1 if cont else 0
def commit(self, cont, isolation_level=0):
logger.info('Sending COMMIT TRAN')
self.submit_commit(cont, isolation_level=isolation_level)
prev_timeout = self._tds.sock.gettimeout()
self._tds.sock.settimeout(None)
try:
self.process_simple_request()
finally:
self._tds.sock.settimeout(prev_timeout)
def submit_commit(self, cont, isolation_level=0):
if tds_base.IS_TDS72_PLUS(self):
self.messages = []
self.cancel_if_pending()
w = self._writer
with self.querying_context(tds_base.PacketType.TRANS):
self._start_query()
flags = 0
if cont:
flags |= 1
w.pack(
self._commit_rollback_tran_struct72_hdr,
7, # TM_COMMIT_XACT
0, # transaction name
flags,
)
if cont:
w.pack(
self._continue_tran_struct72,
isolation_level,
0, # new transaction name
)
else:
self.submit_plain_query(
"IF @@TRANCOUNT > 0 COMMIT BEGIN TRANSACTION" if cont else "IF @@TRANCOUNT > 0 COMMIT")
self.conn.tds72_transaction = 1 if cont else 0
_tds72_query_start = struct.Struct('<IIHQI')
def _start_query(self):
w = self._writer
w.pack(_TdsSession._tds72_query_start,
0x16, # total length
0x12, # length
2, # type
self.conn.tds72_transaction,
1, # request count
)
def send_prelogin(self, login):
from . import intversion
# https://msdn.microsoft.com/en-us/library/dd357559.aspx
instance_name = login.instance_name or 'MSSQLServer'
instance_name = instance_name.encode('ascii')
if len(instance_name) > 65490:
raise ValueError('Instance name is too long')
if tds_base.IS_TDS72_PLUS(self):
start_pos = 26
buf = struct.pack(
b'>BHHBHHBHHBHHBHHB',
# netlib version
PreLoginToken.VERSION, start_pos, 6,
# encryption
PreLoginToken.ENCRYPTION, start_pos + 6, 1,
# instance
PreLoginToken.INSTOPT, start_pos + 6 + 1, len(instance_name) + 1,
# thread id
PreLoginToken.THREADID, start_pos + 6 + 1 + len(instance_name) + 1, 4,
# MARS enabled
PreLoginToken.MARS, start_pos + 6 + 1 + len(instance_name) + 1 + 4, 1,
# end
PreLoginToken.TERMINATOR
)
else:
start_pos = 21
buf = struct.pack(
b'>BHHBHHBHHBHHB',
# netlib version
PreLoginToken.VERSION, start_pos, 6,
# encryption
PreLoginToken.ENCRYPTION, start_pos + 6, 1,
# instance
PreLoginToken.INSTOPT, start_pos + 6 + 1, len(instance_name) + 1,
# thread id
PreLoginToken.THREADID, start_pos + 6 + 1 + len(instance_name) + 1, 4,
# end
PreLoginToken.TERMINATOR
)
assert start_pos == len(buf)
w = self._writer
w.begin_packet(tds_base.PacketType.PRELOGIN)
w.write(buf)
w.put_uint_be(intversion)
w.put_usmallint_be(0) # build number
# encryption flag
w.put_byte(login.enc_flag)
w.write(instance_name)
w.put_byte(0) # zero terminate instance_name
w.put_int(0) # TODO: change this to thread id
attribs = {
'lib_ver': '%x' % intversion,
'enc_flag': '%x' % login.enc_flag,
'inst_name': instance_name,
}
if tds_base.IS_TDS72_PLUS(self):
# MARS (1 enabled)
w.put_byte(1 if login.use_mars else 0)
attribs['mars'] = login.use_mars
logger.info('Sending PRELOGIN %s', ' '.join('%s=%s' % (n, v) for n, v in attribs.items()))
w.flush()
def process_prelogin(self, login):
# https://msdn.microsoft.com/en-us/library/dd357559.aspx
p = self._reader.read_whole_packet()
size = len(p)
if size <= 0 or self._reader.packet_type != tds_base.PacketType.REPLY:
self.bad_stream('Invalid packet type: {0}, expected PRELOGIN(4)'.format(self._reader.packet_type))
self.parse_prelogin(octets=p, login=login)
def parse_prelogin(self, octets, login):
# https://msdn.microsoft.com/en-us/library/dd357559.aspx
size = len(octets)
p = octets
# default 2, no certificate, no encryptption
crypt_flag = 2
i = 0
byte_struct = struct.Struct('B')
off_len_struct = struct.Struct('>HH')
prod_version_struct = struct.Struct('>LH')
while True:
if i >= size:
self.bad_stream('Invalid size of PRELOGIN structure')
type_id, = byte_struct.unpack_from(p, i)
if type_id == PreLoginToken.TERMINATOR:
break
if i + 4 > size:
self.bad_stream('Invalid size of PRELOGIN structure')
off, l = off_len_struct.unpack_from(p, i + 1)
if off > size or off + l > size:
self.bad_stream('Invalid offset in PRELOGIN structure')
if type_id == PreLoginToken.VERSION:
self.conn.server_library_version = prod_version_struct.unpack_from(p, off)
elif type_id == PreLoginToken.ENCRYPTION and l >= 1:
crypt_flag, = byte_struct.unpack_from(p, off)
elif type_id == PreLoginToken.MARS:
self.conn._mars_enabled = bool(byte_struct.unpack_from(p, off)[0])
elif type_id == PreLoginToken.INSTOPT:
# ignore instance name mismatch
pass
i += 5
logger.info("Got PRELOGIN response crypt=%x mars=%d",
crypt_flag, self.conn._mars_enabled)
# if server do not has certificate do normal login
login.server_enc_flag = crypt_flag
if crypt_flag == PreLoginEnc.ENCRYPT_OFF:
if login.enc_flag == PreLoginEnc.ENCRYPT_ON:
raise self.bad_stream('Server returned unexpected ENCRYPT_ON value')
else:
# encrypt login packet only
tls.establish_channel(self)
elif crypt_flag == PreLoginEnc.ENCRYPT_ON:
# encrypt entire connection
tls.establish_channel(self)
elif crypt_flag == PreLoginEnc.ENCRYPT_REQ:
if login.enc_flag == PreLoginEnc.ENCRYPT_NOT_SUP:
# connection terminated by server and client
raise tds_base.Error('Client does not have encryption enabled but it is required by server, '
'enable encryption and try connecting again')
else:
# encrypt entire connection
tls.establish_channel(self)
elif crypt_flag == PreLoginEnc.ENCRYPT_NOT_SUP:
if login.enc_flag == PreLoginEnc.ENCRYPT_ON:
# connection terminated by server and client
raise tds_base.Error('You requested encryption but it is not supported by server')
# do not encrypt anything
else:
self.bad_stream('Unexpected value of enc_flag returned by server: {}'.format(crypt_flag))
def tds7_send_login(self, login):
# https://msdn.microsoft.com/en-us/library/dd304019.aspx
option_flag2 = login.option_flag2
user_name = login.user_name
if len(user_name) > 128:
raise ValueError('User name should be no longer that 128 characters')
if len(login.password) > 128:
raise ValueError('Password should be not longer than 128 characters')
if len(login.change_password) > 128:
raise ValueError('Password should be not longer than 128 characters')
if len(login.client_host_name) > 128:
raise ValueError('Host name should be not longer than 128 characters')
if len(login.app_name) > 128:
raise ValueError('App name should be not longer than 128 characters')
if len(login.server_name) > 128:
raise ValueError('Server name should be not longer than 128 characters')
if len(login.database) > 128:
raise ValueError('Database name should be not longer than 128 characters')
if len(login.language) > 128:
raise ValueError('Language should be not longer than 128 characters')
if len(login.attach_db_file) > 260:
raise ValueError('File path should be not longer than 260 characters')
w = self._writer
w.begin_packet(tds_base.PacketType.LOGIN)
self.authentication = None
current_pos = 86 + 8 if tds_base.IS_TDS72_PLUS(self) else 86
client_host_name = login.client_host_name
login.client_host_name = client_host_name
packet_size = current_pos + (len(client_host_name) + len(login.app_name) + len(login.server_name) +
len(login.library) + len(login.language) + len(login.database)) * 2
if login.auth:
self.authentication = login.auth
auth_packet = login.auth.create_packet()
packet_size += len(auth_packet)
else:
auth_packet = ''
packet_size += (len(user_name) + len(login.password)) * 2
w.put_int(packet_size)
w.put_uint(login.tds_version)
w.put_int(w.bufsize)
from . import intversion
w.put_uint(intversion)
w.put_int(login.pid)
w.put_uint(0) # connection id
option_flag1 = tds_base.TDS_SET_LANG_ON | tds_base.TDS_USE_DB_NOTIFY | tds_base.TDS_INIT_DB_FATAL
if not login.bulk_copy:
option_flag1 |= tds_base.TDS_DUMPLOAD_OFF
w.put_byte(option_flag1)
if self.authentication:
option_flag2 |= tds_base.TDS_INTEGRATED_SECURITY_ON
w.put_byte(option_flag2)
type_flags = 0
if login.readonly:
type_flags |= (2 << 5)
w.put_byte(type_flags)
option_flag3 = tds_base.TDS_UNKNOWN_COLLATION_HANDLING
w.put_byte(option_flag3 if tds_base.IS_TDS73_PLUS(self) else 0)
mins_fix = int(login.client_tz.utcoffset(datetime.datetime.now()).total_seconds()) // 60
logger.info('Sending LOGIN tds_ver=%x bufsz=%d pid=%d opt1=%x opt2=%x opt3=%x cli_tz=%d cli_lcid=%s '
'cli_host=%s lang=%s db=%s',
login.tds_version, w.bufsize, login.pid, option_flag1, option_flag2, option_flag3, mins_fix,
login.client_lcid, client_host_name, login.language, login.database)
w.put_int(mins_fix)
w.put_int(login.client_lcid)
w.put_smallint(current_pos)
w.put_smallint(len(client_host_name))
current_pos += len(client_host_name) * 2
if self.authentication:
w.put_smallint(0)
w.put_smallint(0)
w.put_smallint(0)
w.put_smallint(0)
else:
w.put_smallint(current_pos)
w.put_smallint(len(user_name))
current_pos += len(user_name) * 2
w.put_smallint(current_pos)
w.put_smallint(len(login.password))
current_pos += len(login.password) * 2
w.put_smallint(current_pos)
w.put_smallint(len(login.app_name))
current_pos += len(login.app_name) * 2
# server name
w.put_smallint(current_pos)
w.put_smallint(len(login.server_name))
current_pos += len(login.server_name) * 2
# reserved
w.put_smallint(0)
w.put_smallint(0)
# library name
w.put_smallint(current_pos)
w.put_smallint(len(login.library))
current_pos += len(login.library) * 2
# language
w.put_smallint(current_pos)
w.put_smallint(len(login.language))
current_pos += len(login.language) * 2
# database name
w.put_smallint(current_pos)
w.put_smallint(len(login.database))
current_pos += len(login.database) * 2
# ClientID
client_id = struct.pack('>Q', login.client_id)[2:]
w.write(client_id)
# authentication
w.put_smallint(current_pos)
w.put_smallint(len(auth_packet))
current_pos += len(auth_packet)
# db file
w.put_smallint(current_pos)
w.put_smallint(len(login.attach_db_file))
current_pos += len(login.attach_db_file) * 2
if tds_base.IS_TDS72_PLUS(self):
# new password
w.put_smallint(current_pos)
w.put_smallint(len(login.change_password))
# sspi long
w.put_int(0)
w.write_ucs2(client_host_name)
if not self.authentication:
w.write_ucs2(user_name)
w.write(tds7_crypt_pass(login.password))
w.write_ucs2(login.app_name)
w.write_ucs2(login.server_name)
w.write_ucs2(login.library)
w.write_ucs2(login.language)
w.write_ucs2(login.database)
if self.authentication:
w.write(auth_packet)
w.write_ucs2(login.attach_db_file)
w.write_ucs2(login.change_password)
w.flush()
_SERVER_TO_CLIENT_MAPPING = {
0x07000000: tds_base.TDS70,
0x07010000: tds_base.TDS71,
0x71000001: tds_base.TDS71rev1,
tds_base.TDS72: tds_base.TDS72,
tds_base.TDS73A: tds_base.TDS73A,
tds_base.TDS73B: tds_base.TDS73B,
tds_base.TDS74: tds_base.TDS74,
}
def process_login_tokens(self):
r = self._reader
succeed = False
while True:
marker = r.get_byte()
if marker == tds_base.TDS_LOGINACK_TOKEN:
# https://msdn.microsoft.com/en-us/library/dd340651.aspx
succeed = True
size = r.get_smallint()
r.get_byte() # interface
version = r.get_uint_be()
self.conn.tds_version = self._SERVER_TO_CLIENT_MAPPING.get(version, version)
if not tds_base.IS_TDS7_PLUS(self):
self.bad_stream('Only TDS 7.0 and higher are supported')
# get server product name
# ignore product name length, some servers seem to set it incorrectly
r.get_byte()
size -= 10
self.conn.product_name = r.read_ucs2(size // 2)
product_version = r.get_uint_be()
logger.info('Got LOGINACK tds_ver=%x srv_name=%s srv_ver=%x',
self.conn.tds_version, self.conn.product_name, product_version)
# MSSQL 6.5 and 7.0 seem to return strange values for this
# using TDS 4.2, something like 5F 06 32 FF for 6.50
self.conn.product_version = product_version
if self.authentication:
self.authentication.close()
self.authentication = None
else:
self.process_token(marker)
if marker == tds_base.TDS_DONE_TOKEN:
break
return succeed
def process_returnstatus(self):
self.log_response_message('got RETURNSTATUS message')
self.ret_status = self._reader.get_int()
self.has_status = True
def process_token(self, marker):
handler = _token_map.get(marker)
if not handler:
self.bad_stream('Invalid TDS marker: {0}({0:x})'.format(marker))
return handler(self)
def get_token_id(self):
self.set_state(tds_base.TDS_READING)
try:
marker = self._reader.get_byte()
except tds_base.TimeoutError:
self.set_state(tds_base.TDS_PENDING)
raise
except:
self._tds.close()
raise
return marker
def process_simple_request(self):
while True:
marker = self.get_token_id()
if marker in (tds_base.TDS_DONE_TOKEN, tds_base.TDS_DONEPROC_TOKEN, tds_base.TDS_DONEINPROC_TOKEN):
self.process_end(marker)
if not self.done_flags & tds_base.TDS_DONE_MORE_RESULTS:
return
else:
self.process_token(marker)
def next_set(self):
while self.more_rows:
self.next_row()
if self.state == tds_base.TDS_IDLE:
return False
if self.find_result_or_done():
return True
def fetchone(self):
if self.res_info is None:
raise tds_base.ProgrammingError("Previous statement didn't produce any results")
if self.skipped_to_status:
raise tds_base.ProgrammingError("Unable to fetch any rows after accessing return_status")
if not self.next_row():
return None
return self.row
def next_row(self):
if not self.more_rows:
return False
while True:
marker = self.get_token_id()
if marker in (tds_base.TDS_ROW_TOKEN, tds_base.TDS_NBC_ROW_TOKEN):
self.process_token(marker)
return True
elif marker in (tds_base.TDS_DONE_TOKEN, tds_base.TDS_DONEPROC_TOKEN, tds_base.TDS_DONEINPROC_TOKEN):
self.process_end(marker)
return False
else:
self.process_token(marker)
def find_result_or_done(self):
self.done_flags = 0
while True:
marker = self.get_token_id()
if marker == tds_base.TDS7_RESULT_TOKEN:
self.process_token(marker)
return True
elif marker in (tds_base.TDS_DONE_TOKEN, tds_base.TDS_DONEPROC_TOKEN, tds_base.TDS_DONEINPROC_TOKEN):
self.process_end(marker)
if self.done_flags & tds_base.TDS_DONE_MORE_RESULTS:
if self.done_flags & tds_base.TDS_DONE_COUNT:
return True
else:
return False
else:
self.process_token(marker)
def process_rpc(self):
self.done_flags = 0
self.return_value_index = 0
while True:
marker = self.get_token_id()
if marker == tds_base.TDS7_RESULT_TOKEN:
self.process_token(marker)
return True
elif marker in (tds_base.TDS_DONE_TOKEN, tds_base.TDS_DONEPROC_TOKEN):
self.process_end(marker)
if self.done_flags & tds_base.TDS_DONE_MORE_RESULTS and not self.done_flags & tds_base.TDS_DONE_COUNT:
# skip results that don't event have rowcount
continue
return False
else:
self.process_token(marker)
def complete_rpc(self):
# go through all result sets
while self.next_set():
pass
def find_return_status(self):
self.skipped_to_status = True
while True:
marker = self.get_token_id()
self.process_token(marker)
if marker == tds_base.TDS_RETURNSTATUS_TOKEN:
return
_token_map = {
tds_base.TDS_AUTH_TOKEN: _TdsSession.process_auth,
tds_base.TDS_ENVCHANGE_TOKEN: _TdsSession.process_env_chg,
tds_base.TDS_DONE_TOKEN: lambda self: self.process_end(tds_base.TDS_DONE_TOKEN),
tds_base.TDS_DONEPROC_TOKEN: lambda self: self.process_end(tds_base.TDS_DONEPROC_TOKEN),
tds_base.TDS_DONEINPROC_TOKEN: lambda self: self.process_end(tds_base.TDS_DONEINPROC_TOKEN),
tds_base.TDS_ERROR_TOKEN: lambda self: self.process_msg(tds_base.TDS_ERROR_TOKEN),
tds_base.TDS_INFO_TOKEN: lambda self: self.process_msg(tds_base.TDS_INFO_TOKEN),
tds_base.TDS_CAPABILITY_TOKEN: lambda self: self.process_msg(tds_base.TDS_CAPABILITY_TOKEN),
tds_base.TDS_PARAM_TOKEN: lambda self: self.process_param(),
tds_base.TDS7_RESULT_TOKEN: lambda self: self.tds7_process_result(),
tds_base.TDS_ROW_TOKEN: lambda self: self.process_row(),
tds_base.TDS_NBC_ROW_TOKEN: lambda self: self.process_nbcrow(),
tds_base.TDS_ORDERBY_TOKEN: lambda self: self.process_orderby(),
tds_base.TDS_RETURNSTATUS_TOKEN: lambda self: self.process_returnstatus(),
}
# this class represents root TDS connection
# if MARS is used it can have multiple sessions represented by _TdsSession class
# if MARS is not used it would have single _TdsSession instance
class _TdsSocket(object):
def __init__(self, use_tz=None):
self._is_connected = False
self.env = _TdsEnv()
self.collation = None
self.tds72_transaction = 0
self._mars_enabled = False
self.sock = None
self.bufsize = 4096
self.tds_version = tds_base.TDS74
self.use_tz = use_tz
self.type_factory = tds_types.SerializerFactory(self.tds_version)
self.type_inferrer = None
self.query_timeout = 0
self._smp_manager = None
self._main_session = None
self._login = None
self.route = None
def __repr__(self):
fmt = "<_TdsSocket tran={} mars={} tds_version={} use_tz={}>"
return fmt.format(self.tds72_transaction, self._mars_enabled,
self.tds_version, self.use_tz)
def login(self, login, sock, tzinfo_factory):
self._login = login
self.bufsize = login.blocksize
self.query_timeout = login.query_timeout
self._main_session = _TdsSession(self, sock, tzinfo_factory)
self.sock = sock
self.tds_version = login.tds_version
login.server_enc_flag = PreLoginEnc.ENCRYPT_NOT_SUP
if tds_base.IS_TDS71_PLUS(self):
self._main_session.send_prelogin(login)
self._main_session.process_prelogin(login)
self._main_session.tds7_send_login(login)
if login.server_enc_flag == PreLoginEnc.ENCRYPT_OFF:
tls.revert_to_clear(self._main_session)
if not self._main_session.process_login_tokens():
self._main_session.raise_db_exception()
if self.route is not None:
return self.route
# update block size if server returned different one
if self._main_session._writer.bufsize != self._main_session._reader.get_block_size():
self._main_session._reader.set_block_size(self._main_session._writer.bufsize)
self.type_factory = tds_types.SerializerFactory(self.tds_version)
self.type_inferrer = tds_types.TdsTypeInferrer(
type_factory=self.type_factory,
collation=self.collation,
bytes_to_unicode=self._login.bytes_to_unicode,
allow_tz=not self.use_tz
)
if self._mars_enabled:
from .smp import SmpManager
self._smp_manager = SmpManager(self.sock)
self._main_session = _TdsSession(
self,
self._smp_manager.create_session(),
tzinfo_factory)
self._is_connected = True
q = []
if login.database and self.env.database != login.database:
q.append('use ' + tds_base.tds_quote_id(login.database))
if q:
self._main_session.submit_plain_query(''.join(q))
self._main_session.process_simple_request()
return None
@property
def mars_enabled(self):
return self._mars_enabled
@property
def main_session(self):
return self._main_session
def create_session(self, tzinfo_factory):
return _TdsSession(
self, self._smp_manager.create_session(),
tzinfo_factory)
def is_connected(self):
return self._is_connected
def close(self):
self._is_connected = False
if self.sock is not None:
self.sock.close()
if self._smp_manager:
self._smp_manager.transport_closed()
self._main_session.state = tds_base.TDS_DEAD
if self._main_session.authentication:
self._main_session.authentication.close()
self._main_session.authentication = None
class _Results(object):
def __init__(self):
self.columns = []
self.row_count = 0
def _parse_instances(msg):
name = None
if len(msg) > 3 and tds_base.my_ord(msg[0]) == 5:
tokens = msg[3:].decode('ascii').split(';')
results = {}
instdict = {}
got_name = False
for token in tokens:
if got_name:
instdict[name] = token
got_name = False
else:
name = token
if not name:
if not instdict:
break
results[instdict['InstanceName'].upper()] = instdict
instdict = {}
continue
got_name = True
return results
#
# Get port of all instances
# @return default port number or 0 if error
# @remark experimental, cf. MC-SQLR.pdf.
#
def tds7_get_instances(ip_addr, timeout=5):
s = socket.socket(type=socket.SOCK_DGRAM)
s.settimeout(timeout)
try:
# send the request
s.sendto(b'\x03', (ip_addr, 1434))
msg = s.recv(16 * 1024 - 1)
# got data, read and parse
return _parse_instances(msg)
finally:
s.close() | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/tds.py | tds.py |
import struct
import logging
import threading
import socket
import errno
from six.moves import range
try:
from bitarray import bitarray
except ImportError:
class BitArray(list):
def __init__(self, size):
super(BitArray, self).__init__()
self[:] = [False] * size
def setall(self, val):
for i in range(len(self)):
self[i] = val
bitarray = BitArray
from .tds_base import Error, skipall
logger = logging.getLogger(__name__)
SMP_HEADER = struct.Struct('<BBHLLL')
SMP_ID = 0x53
class _SmpSession(object):
def __init__(self, mgr, session_id):
self.session_id = session_id
self.seq_num_for_send = 0
self.high_water_for_send = 4
self._seq_num_for_recv = 0
self.high_water_for_recv = 4
self._last_high_water_for_recv = 4
self._mgr = mgr
self.recv_queue = []
self.send_queue = []
self._state = None
self._curr_buf_pos = 0
self._curr_buf = b''
def __repr__(self):
fmt = "<_SmpSession sid={} state={} recv_queue={} send_queue={} seq_num_for_send={}>"
return fmt.format(self.session_id, SessionState.to_str(self._state), self.recv_queue, self.send_queue,
self.seq_num_for_send)
def get_state(self):
return self._state
def close(self):
self._mgr.close_smp_session(self)
def sendall(self, data):
self._mgr.send_packet(self, data)
def _recv_internal(self, size):
if not self._curr_buf[self._curr_buf_pos:]:
self._curr_buf = self._mgr.recv_packet(self)
self._curr_buf_pos = 0
if not self._curr_buf:
return 0, 0
to_read = min(size, len(self._curr_buf) - self._curr_buf_pos)
offset = self._curr_buf_pos
self._curr_buf_pos += to_read
return offset, to_read
def recv_into(self, buffer, size=0):
if size == 0:
size = len(buffer)
offset, to_read = self._recv_internal(size)
buffer[:to_read] = self._curr_buf[offset:offset + to_read]
return to_read
#def recv(self, size):
# offset, to_read = self._recv_internal(size)
# return self._curr_buf[offset:offset + to_read]
def is_connected(self):
return self._state == SessionState.SESSION_ESTABLISHED
class PacketTypes:
SYN = 0x1
ACK = 0x2
FIN = 0x4
DATA = 0x8
#@staticmethod
#def type_to_str(t):
# if t == PacketTypes.SYN:
# return 'SYN'
# elif t == PacketTypes.ACK:
# return 'ACK'
# elif t == PacketTypes.DATA:
# return 'DATA'
# elif t == PacketTypes.FIN:
# return 'FIN'
class SessionState:
SESSION_ESTABLISHED = 1
CLOSED = 2
FIN_SENT = 3
FIN_RECEIVED = 4
@staticmethod
def to_str(st):
if st == SessionState.SESSION_ESTABLISHED:
return 'SESSION ESTABLISHED'
elif st == SessionState.CLOSED:
return 'CLOSED'
elif st == SessionState.FIN_SENT:
return 'FIN SENT'
elif st == SessionState.FIN_RECEIVED:
return 'FIN RECEIVED'
class SmpManager(object):
def __init__(self, transport, max_sessions=2 ** 16):
self._transport = transport
self._sessions = {}
self._used_ids_ba = bitarray(max_sessions)
self._used_ids_ba.setall(False)
self._lock = threading.RLock()
self._hdr_buf = memoryview(bytearray(b'\x00' * SMP_HEADER.size))
def __repr__(self):
return "<SmpManager sessions={}>".format(self._sessions)
def create_session(self):
try:
session_id = self._used_ids_ba.index(False)
except ValueError:
raise Error("Can't create more MARS sessions, close some sessions and try again")
session = _SmpSession(self, session_id)
with self._lock:
self._sessions[session_id] = session
self._used_ids_ba[session_id] = True
hdr = SMP_HEADER.pack(
SMP_ID,
PacketTypes.SYN,
session_id,
SMP_HEADER.size,
0,
session.high_water_for_recv,
)
self._transport.sendall(hdr)
session._state = SessionState.SESSION_ESTABLISHED
return session
def close_smp_session(self, session):
if session._state in (SessionState.CLOSED, SessionState.FIN_SENT):
return
elif session._state == SessionState.SESSION_ESTABLISHED:
with self._lock:
hdr = SMP_HEADER.pack(
SMP_ID,
PacketTypes.FIN,
session.session_id,
SMP_HEADER.size,
session.seq_num_for_send,
session.high_water_for_recv,
)
session._state = SessionState.FIN_SENT
try:
self._transport.sendall(hdr)
self.recv_packet(session)
except (socket.error, OSError) as ex:
if ex.errno in (errno.ECONNRESET, errno.EPIPE):
session._state = SessionState.CLOSED
else:
raise ex
def send_queued_packets(self, session):
with self._lock:
while session.send_queue and session.seq_num_for_send < session.high_water_for_send:
data = session.send_queue.pop(0)
self.send_packet(session, data)
@staticmethod
def _add_one_wrap(val):
return 0 if val == 2 ** 32 - 1 else val + 1
def send_packet(self, session, data):
with self._lock:
if session.seq_num_for_send < session.high_water_for_send:
l = SMP_HEADER.size + len(data)
seq_num = self._add_one_wrap(session.seq_num_for_send)
hdr = SMP_HEADER.pack(
SMP_ID,
PacketTypes.DATA,
session.session_id,
l,
seq_num,
session.high_water_for_recv,
)
session._last_high_water_for_recv = session.high_water_for_recv
self._transport.sendall(hdr + data)
session.seq_num_for_send = self._add_one_wrap(session.seq_num_for_send)
else:
session.send_queue.append(data)
self._read_smp_message()
def recv_packet(self, session):
with self._lock:
if session._state == SessionState.CLOSED:
return b''
while not session.recv_queue:
self._read_smp_message()
if session._state in (SessionState.CLOSED, SessionState.FIN_RECEIVED):
return b''
session.high_water_for_recv = self._add_one_wrap(session.high_water_for_recv)
if session.high_water_for_recv - session._last_high_water_for_recv >= 2:
hdr = SMP_HEADER.pack(
SMP_ID,
PacketTypes.ACK,
session.session_id,
SMP_HEADER.size,
session.seq_num_for_send,
session.high_water_for_recv,
)
self._transport.sendall(hdr)
session._last_high_water_for_recv = session.high_water_for_recv
return session.recv_queue.pop(0)
def _bad_stm(self, message):
self.close()
raise Error(message)
def _read_smp_message(self):
# caller should acquire lock before calling this function
buf_pos = 0
while buf_pos < SMP_HEADER.size:
read = self._transport.recv_into(self._hdr_buf[buf_pos:])
buf_pos += read
if read == 0:
self._bad_stm('Unexpected EOF while reading SMP header')
smid, flags, sid, l, seq_num, wnd = SMP_HEADER.unpack(self._hdr_buf)
if smid != SMP_ID:
self._bad_stm('Invalid SMP packet signature')
try:
session = self._sessions[sid]
except KeyError:
self._bad_stm('Invalid SMP packet session id')
if wnd < session.high_water_for_send:
self._bad_stm('Invalid WNDW in packet from server')
if seq_num > session.high_water_for_recv:
self._bad_stm('Invalid SEQNUM in packet from server')
if l < SMP_HEADER.size:
self._bad_stm('Invalid LENGTH in packet from server')
session._last_recv_seq_num = seq_num
if flags == PacketTypes.DATA:
if session._state == SessionState.SESSION_ESTABLISHED:
if seq_num != self._add_one_wrap(session._seq_num_for_recv):
self._bad_stm('Invalid SEQNUM in DATA packet from server')
session._seq_num_for_recv = seq_num
remains = l - SMP_HEADER.size
while remains:
data = self._transport.recv(remains)
session.recv_queue.append(data)
remains -= len(data)
if wnd > session.high_water_for_send:
session.high_water_for_send = wnd
self.send_queued_packets(session)
elif session._state == SessionState.FIN_SENT:
skipall(self._transport, l - SMP_HEADER.size)
else:
self._bad_stm('Unexpected DATA packet from server')
elif flags == PacketTypes.ACK:
if session._state in (SessionState.FIN_RECEIVED, SessionState.CLOSED):
self._bad_stm('Unexpected ACK packet from server')
if seq_num != session._seq_num_for_recv:
self._bad_stm('Invalid SEQNUM in ACK packet from server')
session.high_water_for_send = wnd
self.send_queued_packets(session)
elif flags == PacketTypes.FIN:
assert session._state in (SessionState.SESSION_ESTABLISHED, SessionState.FIN_SENT, SessionState.FIN_RECEIVED)
if session._state == SessionState.SESSION_ESTABLISHED:
session._state = SessionState.FIN_RECEIVED
elif session._state == SessionState.FIN_SENT:
session._state = SessionState.CLOSED
del self._sessions[session.session_id]
self._used_ids_ba[session.session_id] = False
elif session._state == SessionState.FIN_RECEIVED:
self._bad_stm('Unexpected FIN packet from server')
elif flags == PacketTypes.SYN:
self._bad_stm('Unexpected SYN packet from server')
else:
self._bad_stm('Unexpected FLAGS in packet from server')
def close(self):
self._transport.close()
def transport_closed(self):
for session in self._sessions.values():
session._state = SessionState.CLOSED | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/smp.py | smp.py |
import socket
import sys
import six
# tds protocol versions
TDS70 = 0x70000000
TDS71 = 0x71000000
TDS71rev1 = 0x71000001
TDS72 = 0x72090002
TDS73A = 0x730A0003
TDS73 = TDS73A
TDS73B = 0x730B0003
TDS74 = 0x74000004
IS_TDS7_PLUS = lambda x: x.tds_version >= TDS70
IS_TDS71_PLUS = lambda x: x.tds_version >= TDS71
IS_TDS72_PLUS = lambda x: x.tds_version >= TDS72
IS_TDS73_PLUS = lambda x: x.tds_version >= TDS73A
# https://msdn.microsoft.com/en-us/library/dd304214.aspx
class PacketType:
QUERY = 1
OLDLOGIN = 2
RPC = 3
REPLY = 4
CANCEL = 6
BULK = 7
FEDAUTHTOKEN = 8
TRANS = 14 # transaction management
LOGIN = 16
AUTH = 17
PRELOGIN = 18
# mssql login options flags
# option_flag1_values
TDS_BYTE_ORDER_X86 = 0
TDS_CHARSET_ASCII = 0
TDS_DUMPLOAD_ON = 0
TDS_FLOAT_IEEE_754 = 0
TDS_INIT_DB_WARN = 0
TDS_SET_LANG_OFF = 0
TDS_USE_DB_SILENT = 0
TDS_BYTE_ORDER_68000 = 0x01
TDS_CHARSET_EBDDIC = 0x02
TDS_FLOAT_VAX = 0x04
TDS_FLOAT_ND5000 = 0x08
TDS_DUMPLOAD_OFF = 0x10 # prevent BCP
TDS_USE_DB_NOTIFY = 0x20
TDS_INIT_DB_FATAL = 0x40
TDS_SET_LANG_ON = 0x80
# enum option_flag2_values
TDS_INIT_LANG_WARN = 0
TDS_INTEGRATED_SECURTY_OFF = 0
TDS_ODBC_OFF = 0
TDS_USER_NORMAL = 0 # SQL Server login
TDS_INIT_LANG_REQUIRED = 0x01
TDS_ODBC_ON = 0x02
TDS_TRANSACTION_BOUNDARY71 = 0x04 # removed in TDS 7.2
TDS_CACHE_CONNECT71 = 0x08 # removed in TDS 7.2
TDS_USER_SERVER = 0x10 # reserved
TDS_USER_REMUSER = 0x20 # DQ login
TDS_USER_SQLREPL = 0x40 # replication login
TDS_INTEGRATED_SECURITY_ON = 0x80
# enum option_flag3_values TDS 7.3+
TDS_RESTRICTED_COLLATION = 0
TDS_CHANGE_PASSWORD = 0x01
TDS_SEND_YUKON_BINARY_XML = 0x02
TDS_REQUEST_USER_INSTANCE = 0x04
TDS_UNKNOWN_COLLATION_HANDLING = 0x08
TDS_ANY_COLLATION = 0x10
TDS5_PARAMFMT2_TOKEN = 32 # 0x20
TDS_LANGUAGE_TOKEN = 33 # 0x20 TDS 5.0 only
TDS_ORDERBY2_TOKEN = 34 # 0x22
TDS_ROWFMT2_TOKEN = 97 # 0x61 TDS 5.0 only
TDS_LOGOUT_TOKEN = 113 # 0x71 TDS 5.0 only?
TDS_RETURNSTATUS_TOKEN = 121 # 0x79
TDS_PROCID_TOKEN = 124 # 0x7C TDS 4.2 only
TDS7_RESULT_TOKEN = 129 # 0x81 TDS 7.0 only
TDS7_COMPUTE_RESULT_TOKEN = 136 # 0x88 TDS 7.0 only
TDS_COLNAME_TOKEN = 160 # 0xA0 TDS 4.2 only
TDS_COLFMT_TOKEN = 161 # 0xA1 TDS 4.2 only
TDS_DYNAMIC2_TOKEN = 163 # 0xA3
TDS_TABNAME_TOKEN = 164 # 0xA4
TDS_COLINFO_TOKEN = 165 # 0xA5
TDS_OPTIONCMD_TOKEN = 166 # 0xA6
TDS_COMPUTE_NAMES_TOKEN = 167 # 0xA7
TDS_COMPUTE_RESULT_TOKEN = 168 # 0xA8
TDS_ORDERBY_TOKEN = 169 # 0xA9
TDS_ERROR_TOKEN = 170 # 0xAA
TDS_INFO_TOKEN = 171 # 0xAB
TDS_PARAM_TOKEN = 172 # 0xAC
TDS_LOGINACK_TOKEN = 173 # 0xAD
TDS_CONTROL_TOKEN = 174 # 0xAE
TDS_ROW_TOKEN = 209 # 0xD1
TDS_NBC_ROW_TOKEN = 210 # 0xD2 as of TDS 7.3.B
TDS_CMP_ROW_TOKEN = 211 # 0xD3
TDS5_PARAMS_TOKEN = 215 # 0xD7 TDS 5.0 only
TDS_CAPABILITY_TOKEN = 226 # 0xE2
TDS_ENVCHANGE_TOKEN = 227 # 0xE3
TDS_DBRPC_TOKEN = 230 # 0xE6
TDS5_DYNAMIC_TOKEN = 231 # 0xE7 TDS 5.0 only
TDS5_PARAMFMT_TOKEN = 236 # 0xEC TDS 5.0 only
TDS_AUTH_TOKEN = 237 # 0xED TDS 7.0 only
TDS_RESULT_TOKEN = 238 # 0xEE
TDS_DONE_TOKEN = 253 # 0xFD
TDS_DONEPROC_TOKEN = 254 # 0xFE
TDS_DONEINPROC_TOKEN = 255 # 0xFF
# CURSOR support: TDS 5.0 only
TDS_CURCLOSE_TOKEN = 128 # 0x80 TDS 5.0 only
TDS_CURDELETE_TOKEN = 129 # 0x81 TDS 5.0 only
TDS_CURFETCH_TOKEN = 130 # 0x82 TDS 5.0 only
TDS_CURINFO_TOKEN = 131 # 0x83 TDS 5.0 only
TDS_CUROPEN_TOKEN = 132 # 0x84 TDS 5.0 only
TDS_CURDECLARE_TOKEN = 134 # 0x86 TDS 5.0 only
# environment type field
TDS_ENV_DATABASE = 1
TDS_ENV_LANG = 2
TDS_ENV_CHARSET = 3
TDS_ENV_PACKSIZE = 4
TDS_ENV_LCID = 5
TDS_ENV_UNICODE_DATA_SORT_COMP_FLAGS = 6
TDS_ENV_SQLCOLLATION = 7
TDS_ENV_BEGINTRANS = 8
TDS_ENV_COMMITTRANS = 9
TDS_ENV_ROLLBACKTRANS = 10
TDS_ENV_ENLIST_DTC_TRANS = 11
TDS_ENV_DEFECT_TRANS = 12
TDS_ENV_DB_MIRRORING_PARTNER = 13
TDS_ENV_PROMOTE_TRANS = 15
TDS_ENV_TRANS_MANAGER_ADDR = 16
TDS_ENV_TRANS_ENDED = 17
TDS_ENV_RESET_COMPLETION_ACK = 18
TDS_ENV_INSTANCE_INFO = 19
TDS_ENV_ROUTING = 20
# Microsoft internal stored procedure id's
TDS_SP_CURSOR = 1
TDS_SP_CURSOROPEN = 2
TDS_SP_CURSORPREPARE = 3
TDS_SP_CURSOREXECUTE = 4
TDS_SP_CURSORPREPEXEC = 5
TDS_SP_CURSORUNPREPARE = 6
TDS_SP_CURSORFETCH = 7
TDS_SP_CURSOROPTION = 8
TDS_SP_CURSORCLOSE = 9
TDS_SP_EXECUTESQL = 10
TDS_SP_PREPARE = 11
TDS_SP_EXECUTE = 12
TDS_SP_PREPEXEC = 13
TDS_SP_PREPEXECRPC = 14
TDS_SP_UNPREPARE = 15
# Flags returned in TDS_DONE token
TDS_DONE_FINAL = 0
TDS_DONE_MORE_RESULTS = 0x01 # more results follow
TDS_DONE_ERROR = 0x02 # error occurred
TDS_DONE_INXACT = 0x04 # transaction in progress
TDS_DONE_PROC = 0x08 # results are from a stored procedure
TDS_DONE_COUNT = 0x10 # count field in packet is valid
TDS_DONE_CANCELLED = 0x20 # acknowledging an attention command (usually a cancel)
TDS_DONE_EVENT = 0x40 # part of an event notification.
TDS_DONE_SRVERROR = 0x100 # SQL server server error
SYBVOID = 31 # 0x1F
IMAGETYPE = SYBIMAGE = 34 # 0x22
TEXTTYPE = SYBTEXT = 35 # 0x23
SYBVARBINARY = 37 # 0x25
INTNTYPE = SYBINTN = 38 # 0x26
SYBVARCHAR = 39 # 0x27
BINARYTYPE = SYBBINARY = 45 # 0x2D
SYBCHAR = 47 # 0x2F
INT1TYPE = SYBINT1 = 48 # 0x30
BITTYPE = SYBBIT = 50 # 0x32
INT2TYPE = SYBINT2 = 52 # 0x34
INT4TYPE = SYBINT4 = 56 # 0x38
DATETIM4TYPE = SYBDATETIME4 = 58 # 0x3A
FLT4TYPE = SYBREAL = 59 # 0x3B
MONEYTYPE = SYBMONEY = 60 # 0x3C
DATETIMETYPE = SYBDATETIME = 61 # 0x3D
FLT8TYPE = SYBFLT8 = 62 # 0x3E
NTEXTTYPE = SYBNTEXT = 99 # 0x63
SYBNVARCHAR = 103 # 0x67
BITNTYPE = SYBBITN = 104 # 0x68
NUMERICNTYPE = SYBNUMERIC = 108 # 0x6C
DECIMALNTYPE = SYBDECIMAL = 106 # 0x6A
FLTNTYPE = SYBFLTN = 109 # 0x6D
MONEYNTYPE = SYBMONEYN = 110 # 0x6E
DATETIMNTYPE = SYBDATETIMN = 111 # 0x6F
MONEY4TYPE = SYBMONEY4 = 122 # 0x7A
INT8TYPE = SYBINT8 = 127 # 0x7F
BIGCHARTYPE = XSYBCHAR = 175 # 0xAF
BIGVARCHRTYPE = XSYBVARCHAR = 167 # 0xA7
NVARCHARTYPE = XSYBNVARCHAR = 231 # 0xE7
NCHARTYPE = XSYBNCHAR = 239 # 0xEF
BIGVARBINTYPE = XSYBVARBINARY = 165 # 0xA5
BIGBINARYTYPE = XSYBBINARY = 173 # 0xAD
GUIDTYPE = SYBUNIQUE = 36 # 0x24
SSVARIANTTYPE = SYBVARIANT = 98 # 0x62
UDTTYPE = SYBMSUDT = 240 # 0xF0
XMLTYPE = SYBMSXML = 241 # 0xF1
TVPTYPE = 243 # 0xF3
DATENTYPE = SYBMSDATE = 40 # 0x28
TIMENTYPE = SYBMSTIME = 41 # 0x29
DATETIME2NTYPE = SYBMSDATETIME2 = 42 # 0x2a
DATETIMEOFFSETNTYPE = SYBMSDATETIMEOFFSET = 43 # 0x2b
#
# Sybase only types
#
SYBLONGBINARY = 225 # 0xE1
SYBUINT1 = 64 # 0x40
SYBUINT2 = 65 # 0x41
SYBUINT4 = 66 # 0x42
SYBUINT8 = 67 # 0x43
SYBBLOB = 36 # 0x24
SYBBOUNDARY = 104 # 0x68
SYBDATE = 49 # 0x31
SYBDATEN = 123 # 0x7B
SYB5INT8 = 191 # 0xBF
SYBINTERVAL = 46 # 0x2E
SYBLONGCHAR = 175 # 0xAF
SYBSENSITIVITY = 103 # 0x67
SYBSINT1 = 176 # 0xB0
SYBTIME = 51 # 0x33
SYBTIMEN = 147 # 0x93
SYBUINTN = 68 # 0x44
SYBUNITEXT = 174 # 0xAE
SYBXML = 163 # 0xA3
TDS_UT_TIMESTAMP = 80
# compute operator
SYBAOPCNT = 0x4b
SYBAOPCNTU = 0x4c
SYBAOPSUM = 0x4d
SYBAOPSUMU = 0x4e
SYBAOPAVG = 0x4f
SYBAOPAVGU = 0x50
SYBAOPMIN = 0x51
SYBAOPMAX = 0x52
# mssql2k compute operator
SYBAOPCNT_BIG = 0x09
SYBAOPSTDEV = 0x30
SYBAOPSTDEVP = 0x31
SYBAOPVAR = 0x32
SYBAOPVARP = 0x33
SYBAOPCHECKSUM_AGG = 0x72
# param flags
fByRefValue = 1
fDefaultValue = 2
TDS_IDLE = 0
TDS_QUERYING = 1
TDS_PENDING = 2
TDS_READING = 3
TDS_DEAD = 4
state_names = ['IDLE', 'QUERYING', 'PENDING', 'READING', 'DEAD']
TDS_ENCRYPTION_OFF = 0
TDS_ENCRYPTION_REQUEST = 1
TDS_ENCRYPTION_REQUIRE = 2
class PreLoginToken:
VERSION = 0
ENCRYPTION = 1
INSTOPT = 2
THREADID = 3
MARS = 4
TRACEID = 5
FEDAUTHREQUIRED = 6
NONCEOPT = 7
TERMINATOR = 0xff
class PreLoginEnc:
ENCRYPT_OFF = 0 # Encryption available but off
ENCRYPT_ON = 1 # Encryption available and on
ENCRYPT_NOT_SUP = 2 # Encryption not available
ENCRYPT_REQ = 3 # Encryption required
PLP_MARKER = 0xffff
PLP_NULL = 0xffffffffffffffff
PLP_UNKNOWN = 0xfffffffffffffffe
TDS_NO_COUNT = -1
TVP_NULL_TOKEN = 0xffff
# TVP COLUMN FLAGS
TVP_COLUMN_DEFAULT_FLAG = 0x200
TVP_END_TOKEN = 0x00
TVP_ROW_TOKEN = 0x01
TVP_ORDER_UNIQUE_TOKEN = 0x10
TVP_COLUMN_ORDERING_TOKEN = 0x11
class CommonEqualityMixin(object):
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def iterdecode(iterable, codec):
""" Uses an incremental decoder to decode each chunk in iterable.
This function is a generator.
:param iterable: Iterable object which yields raw data to be decoded
:param codec: An instance of codec
"""
decoder = codec.incrementaldecoder()
for chunk in iterable:
yield decoder.decode(chunk)
yield decoder.decode(b'', True)
def force_unicode(s):
if isinstance(s, bytes):
try:
return s.decode('utf8')
except UnicodeDecodeError as e:
raise DatabaseError(e)
elif isinstance(s, six.text_type):
return s
else:
return six.text_type(s)
def tds_quote_id(ident):
""" Quote an identifier
:param ident: id to quote
:returns: Quoted identifier
"""
return '[{0}]'.format(ident.replace(']', ']]'))
# store a tuple of programming error codes
prog_errors = (
102, # syntax error
207, # invalid column name
208, # invalid object name
2812, # unknown procedure
4104 # multi-part identifier could not be bound
)
# store a tuple of integrity error codes
integrity_errors = (
515, # NULL insert
547, # FK related
2601, # violate unique index
2627, # violate UNIQUE KEY constraint
)
if sys.version_info[0] >= 3:
exc_base_class = Exception
def my_ord(val):
return val
def join_bytearrays(ba):
return b''.join(ba)
else:
exc_base_class = StandardError
my_ord = ord
def join_bytearrays(bas):
return b''.join(bytes(ba) for ba in bas)
# exception hierarchy
class Warning(exc_base_class):
pass
class Error(exc_base_class):
pass
TimeoutError = socket.timeout
class InterfaceError(Error):
pass
class DatabaseError(Error):
@property
def message(self):
if self.procname:
return 'SQL Server message %d, severity %d, state %d, ' \
'procedure %s, line %d:\n%s' % (self.number,
self.severity, self.state, self.procname,
self.line, self.text)
else:
return 'SQL Server message %d, severity %d, state %d, ' \
'line %d:\n%s' % (self.number, self.severity,
self.state, self.line, self.text)
class ClosedConnectionError(InterfaceError):
def __init__(self):
super(ClosedConnectionError, self).__init__('Server closed connection')
class DataError(Error):
pass
class OperationalError(DatabaseError):
pass
class LoginError(OperationalError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
# DB-API type definitions
class DBAPITypeObject:
def __init__(self, *values):
self.values = set(values)
def __eq__(self, other):
return other in self.values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
# standard dbapi type objects
STRING = DBAPITypeObject(SYBVARCHAR, SYBCHAR, SYBTEXT,
XSYBNVARCHAR, XSYBNCHAR, SYBNTEXT,
XSYBVARCHAR, XSYBCHAR, SYBMSXML)
BINARY = DBAPITypeObject(SYBIMAGE, SYBBINARY, SYBVARBINARY, XSYBVARBINARY, XSYBBINARY)
NUMBER = DBAPITypeObject(SYBBIT, SYBBITN, SYBINT1, SYBINT2, SYBINT4, SYBINT8, SYBINTN,
SYBREAL, SYBFLT8, SYBFLTN)
DATETIME = DBAPITypeObject(SYBDATETIME, SYBDATETIME4, SYBDATETIMN)
DECIMAL = DBAPITypeObject(SYBMONEY, SYBMONEY4, SYBMONEYN, SYBNUMERIC,
SYBDECIMAL)
ROWID = DBAPITypeObject()
# non-standard, but useful type objects
INTEGER = DBAPITypeObject(SYBBIT, SYBBITN, SYBINT1, SYBINT2, SYBINT4, SYBINT8, SYBINTN)
REAL = DBAPITypeObject(SYBREAL, SYBFLT8, SYBFLTN)
XML = DBAPITypeObject(SYBMSXML)
class InternalProc(object):
def __init__(self, proc_id, name):
self.proc_id = proc_id
self.name = name
def __unicode__(self):
return self.name
SP_EXECUTESQL = InternalProc(TDS_SP_EXECUTESQL, 'sp_executesql')
SP_PREPARE = InternalProc(TDS_SP_PREPARE, 'sp_prepare')
SP_EXECUTE = InternalProc(TDS_SP_EXECUTE, 'sp_execute')
def skipall(stm, size):
""" Skips exactly size bytes in stm
If EOF is reached before size bytes are skipped
will raise :class:`ClosedConnectionError`
:param stm: Stream to skip bytes in, should have read method
this read method can return less than requested
number of bytes.
:param size: Number of bytes to skip.
"""
res = stm.recv(size)
if len(res) == size:
return
elif len(res) == 0:
raise ClosedConnectionError()
left = size - len(res)
while left:
buf = stm.recv(left)
if len(buf) == 0:
raise ClosedConnectionError()
left -= len(buf)
def read_chunks(stm, size):
""" Reads exactly size bytes from stm and produces chunks
May call stm.read multiple times until required
number of bytes is read.
If EOF is reached before size bytes are read
will raise :class:`ClosedConnectionError`
:param stm: Stream to read bytes from, should have read method,
this read method can return less than requested
number of bytes.
:param size: Number of bytes to read.
"""
if size == 0:
yield b''
return
res = stm.recv(size)
if len(res) == 0:
raise ClosedConnectionError()
yield res
left = size - len(res)
while left:
buf = stm.recv(left)
if len(buf) == 0:
raise ClosedConnectionError()
yield buf
left -= len(buf)
def readall(stm, size):
""" Reads exactly size bytes from stm
May call stm.read multiple times until required
number of bytes read.
If EOF is reached before size bytes are read
will raise :class:`ClosedConnectionError`
:param stm: Stream to read bytes from, should have read method
this read method can return less than requested
number of bytes.
:param size: Number of bytes to read.
:returns: Bytes buffer of exactly given size.
"""
return join_bytearrays(read_chunks(stm, size))
def readall_fast(stm, size):
"""
Slightly faster version of readall, it reads no more than two chunks.
Meaning that it can only be used to read small data that doesn't span
more that two packets.
:param stm: Stream to read from, should have read method.
:param size: Number of bytes to read.
:return:
"""
buf, offset = stm.read_fast(size)
if len(buf) - offset < size:
# slow case
buf = buf[offset:]
buf += stm.recv(size - len(buf))
return buf, 0
return buf, offset
def total_seconds(td):
""" Total number of seconds in timedelta object
Python 2.6 doesn't have total_seconds method, this function
provides a backport
"""
return td.days * 24 * 60 * 60 + td.seconds
class Column(CommonEqualityMixin):
fNullable = 1
fCaseSen = 2
fReadWrite = 8
fIdentity = 0x10
fComputed = 0x20
def __init__(self, name='', type=None, flags=0, value=None):
self.char_codec = None
self.column_name = name
self.column_usertype = 0
self.flags = flags
self.type = type
self.value = value
self.serializer = None
def __repr__(self):
val = self.value
if isinstance(val, bytes) and len(self.value) > 100:
val = self.value[:100] + b'... len is ' + str(len(val)).encode('ascii')
if isinstance(val, six.text_type) and len(self.value) > 100:
val = self.value[:100] + '... len is ' + str(len(val))
return '<Column(name={},type={},value={},flags={},user_type={},codec={})>'.format(
repr(self.column_name),
repr(self.type),
repr(val),
repr(self.flags),
repr(self.column_usertype),
repr(self.char_codec),
)
def choose_serializer(self, type_factory, collation):
return type_factory.serializer_by_type(sql_type=self.type, collation=collation) | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/tds_base.py | tds_base.py |
import logging
try:
import OpenSSL.SSL
import cryptography.hazmat.backends.openssl.backend
except ImportError:
OPENSSL_AVAILABLE = False
else:
OPENSSL_AVAILABLE = True
from . import tds_base
BUFSIZE = 65536
logger = logging.getLogger(__name__)
class EncryptedSocket(object):
def __init__(self, transport, tls_conn):
self._transport = transport
self._tls_conn = tls_conn
def gettimeout(self):
return self._transport.gettimeout()
def settimeout(self, timeout):
self._transport.settimeout(timeout)
def sendall(self, data, flags=0):
# TLS.Connection does not support bytearrays, need to convert to bytes first
if isinstance(data, bytearray):
data = bytes(data)
res = self._tls_conn.sendall(data)
buf = self._tls_conn.bio_read(BUFSIZE)
self._transport.sendall(buf)
return res
# def send(self, data):
# while True:
# try:
# return self._tls_conn.send(data)
# except OpenSSL.SSL.WantWriteError:
# buf = self._tls_conn.bio_read(BUFSIZE)
# self._transport.sendall(buf)
def recv_into(self, buffer, size=0):
if size == 0:
size = len(buffer)
res = self.recv(size)
buffer[0:len(res)] = res
return len(res)
def recv(self, bufsize):
while True:
try:
buf = self._tls_conn.bio_read(bufsize)
except OpenSSL.SSL.WantReadError:
pass
else:
self._transport.sendall(buf)
try:
return self._tls_conn.recv(bufsize)
except OpenSSL.SSL.WantReadError:
buf = self._transport.recv(BUFSIZE)
if buf:
self._tls_conn.bio_write(buf)
else:
return b''
def close(self):
self._tls_conn.shutdown()
self._transport.close()
def shutdown(self):
self._tls_conn.shutdown()
def verify_cb(conn, cert, err_num, err_depth, ret_code):
return ret_code == 1
def validate_host(cert, name):
"""
Validates host name against certificate
@param cert: Certificate returned by host
@param name: Actual host name used for connection
@return: Returns true if host name matches certificate
"""
cn = None
for t, v in cert.get_subject().get_components():
if t == b'CN':
cn = v
break
if cn == name:
return True
# checking SAN
s_name = name.decode('ascii')
for i in range(cert.get_extension_count()):
ext = cert.get_extension(i)
if ext.get_short_name() == b'subjectAltName':
s = str(ext)
# SANs are usually have form like: DNS:hostname
if s.startswith('DNS:') and s[4:] == s_name:
return True
# TODO handle wildcards
return False
def create_context(cafile):
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv2)
ctx.set_options(OpenSSL.SSL.OP_NO_SSLv3)
ctx.set_verify(OpenSSL.SSL.VERIFY_PEER, verify_cb)
#print("verify depth:", ctx.get_verify_depth())
#print("verify mode:", ctx.get_verify_mode())
#print("openssl version:", cryptography.hazmat.backends.openssl.backend.openssl_version_text())
ctx.load_verify_locations(cafile=cafile)
return ctx
# https://msdn.microsoft.com/en-us/library/dd357559.aspx
def establish_channel(tds_sock):
w = tds_sock._writer
r = tds_sock._reader
login = tds_sock.conn._login
bhost = login.server_name.encode('ascii')
conn = OpenSSL.SSL.Connection(login.tls_ctx)
conn.set_tlsext_host_name(bhost)
# change connection to client mode
conn.set_connect_state()
logger.info('doing TLS handshake')
while True:
try:
logger.debug('calling do_handshake')
conn.do_handshake()
except OpenSSL.SSL.WantReadError:
logger.debug('got WantReadError, getting data from the write end of the TLS connection buffer')
try:
req = conn.bio_read(BUFSIZE)
except OpenSSL.SSL.WantReadError:
# PyOpenSSL - https://github.com/pyca/pyopenssl/issues/887
logger.debug('got WantReadError again, waiting for response...')
else:
logger.debug('sending %d bytes of the handshake data to the server', len(req))
w.begin_packet(tds_base.PacketType.PRELOGIN)
w.write(req)
w.flush()
logger.debug('receiving response from the server')
resp = r.read_whole_packet()
# TODO validate r.packet_type
logger.debug('adding %d bytes of the response into the TLS connection buffer', len(resp))
conn.bio_write(resp)
else:
logger.info('TLS handshake is complete')
if login.validate_host:
if not validate_host(cert=conn.get_peer_certificate(), name=bhost):
raise tds_base.Error("Certificate does not match host name '{}'".format(login.server_name))
enc_sock = EncryptedSocket(transport=tds_sock.conn.sock, tls_conn=conn)
tds_sock.conn.sock = enc_sock
tds_sock._writer._transport = enc_sock
tds_sock._reader._transport = enc_sock
return
def revert_to_clear(tds_sock):
"""
Reverts connection back to non-encrypted mode
Used when client sent ENCRYPT_OFF flag
@param tds_sock:
@return:
"""
enc_conn = tds_sock.conn.sock
clear_conn = enc_conn._transport
enc_conn.shutdown()
tds_sock.conn.sock = clear_conn
tds_sock._writer._transport = clear_conn
tds_sock._reader._transport = clear_conn | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/tls.py | tls.py |
import six
from ctypes import c_ulong, c_ushort, c_void_p, c_ulonglong, POINTER,\
Structure, c_wchar_p, WINFUNCTYPE, windll, byref, cast
class Status(object):
SEC_E_OK = 0
SEC_I_CONTINUE_NEEDED = 0x00090312
SEC_I_COMPLETE_AND_CONTINUE = 0x00090314
SEC_I_INCOMPLETE_CREDENTIALS = 0x00090320
SEC_E_INSUFFICIENT_MEMORY = 0x80090300 - 0x100000000
SEC_E_INVALID_HANDLE = 0x80090301 - 0x100000000
SEC_E_UNSUPPORTED_FUNCTION = 0x80090302 - 0x100000000
SEC_E_INTERNAL_ERROR = 0x80090304 - 0x100000000
SEC_E_SECPKG_NOT_FOUND = 0x80090305 - 0x100000000
SEC_E_NOT_OWNER = 0x80090306 - 0x100000000
SEC_E_INVALID_TOKEN = 0x80090308 - 0x100000000
SEC_E_NO_IMPERSONATION = 0x8009030B - 0x100000000
SEC_E_LOGON_DENIED = 0x8009030C - 0x100000000
SEC_E_UNKNOWN_CREDENTIALS = 0x8009030D - 0x100000000
SEC_E_NO_CREDENTIALS = 0x8009030E - 0x100000000
SEC_E_OUT_OF_SEQUENCE = 0x80090310 - 0x100000000
SEC_E_NO_AUTHENTICATING_AUTHORITY = 0x80090311 - 0x100000000
SEC_E_BUFFER_TOO_SMALL = 0x80090321 - 0x100000000
SEC_E_WRONG_PRINCIPAL = 0x80090322 - 0x100000000
SEC_E_ALGORITHM_MISMATCH = 0x80090331 - 0x100000000
@classmethod
def getname(cls, value):
for name in dir(cls):
if name.startswith('SEC_E_') and getattr(cls, name) == value:
return name
return 'unknown value {0:x}'.format(0x100000000 + value)
#define SECBUFFER_EMPTY 0 // Undefined, replaced by provider
#define SECBUFFER_DATA 1 // Packet data
SECBUFFER_TOKEN = 2
#define SECBUFFER_PKG_PARAMS 3 // Package specific parameters
#define SECBUFFER_MISSING 4 // Missing Data indicator
#define SECBUFFER_EXTRA 5 // Extra data
#define SECBUFFER_STREAM_TRAILER 6 // Security Trailer
#define SECBUFFER_STREAM_HEADER 7 // Security Header
#define SECBUFFER_NEGOTIATION_INFO 8 // Hints from the negotiation pkg
#define SECBUFFER_PADDING 9 // non-data padding
#define SECBUFFER_STREAM 10 // whole encrypted message
#define SECBUFFER_MECHLIST 11
#define SECBUFFER_MECHLIST_SIGNATURE 12
#define SECBUFFER_TARGET 13 // obsolete
#define SECBUFFER_CHANNEL_BINDINGS 14
#define SECBUFFER_CHANGE_PASS_RESPONSE 15
#define SECBUFFER_TARGET_HOST 16
#define SECBUFFER_ALERT 17
SECPKG_CRED_INBOUND = 0x00000001
SECPKG_CRED_OUTBOUND = 0x00000002
SECPKG_CRED_BOTH = 0x00000003
SECPKG_CRED_DEFAULT = 0x00000004
SECPKG_CRED_RESERVED = 0xF0000000
SECBUFFER_VERSION = 0
#define ISC_REQ_DELEGATE 0x00000001
#define ISC_REQ_MUTUAL_AUTH 0x00000002
ISC_REQ_REPLAY_DETECT = 4
#define ISC_REQ_SEQUENCE_DETECT 0x00000008
ISC_REQ_CONFIDENTIALITY = 0x10
ISC_REQ_USE_SESSION_KEY = 0x00000020
ISC_REQ_PROMPT_FOR_CREDS = 0x00000040
ISC_REQ_USE_SUPPLIED_CREDS = 0x00000080
ISC_REQ_ALLOCATE_MEMORY = 0x00000100
ISC_REQ_USE_DCE_STYLE = 0x00000200
ISC_REQ_DATAGRAM = 0x00000400
ISC_REQ_CONNECTION = 0x00000800
#define ISC_REQ_CALL_LEVEL 0x00001000
#define ISC_REQ_FRAGMENT_SUPPLIED 0x00002000
#define ISC_REQ_EXTENDED_ERROR 0x00004000
#define ISC_REQ_STREAM 0x00008000
#define ISC_REQ_INTEGRITY 0x00010000
#define ISC_REQ_IDENTIFY 0x00020000
#define ISC_REQ_NULL_SESSION 0x00040000
#define ISC_REQ_MANUAL_CRED_VALIDATION 0x00080000
#define ISC_REQ_RESERVED1 0x00100000
#define ISC_REQ_FRAGMENT_TO_FIT 0x00200000
#// This exists only in Windows Vista and greater
#define ISC_REQ_FORWARD_CREDENTIALS 0x00400000
#define ISC_REQ_NO_INTEGRITY 0x00800000 // honored only by SPNEGO
#define ISC_REQ_USE_HTTP_STYLE 0x01000000
#define ISC_REQ_UNVERIFIED_TARGET_NAME 0x20000000
#define ISC_REQ_CONFIDENTIALITY_ONLY 0x40000000 // honored by SPNEGO/Kerberos
SECURITY_NETWORK_DREP = 0
SECURITY_NATIVE_DREP = 0x10
SECPKG_CRED_ATTR_NAMES = 1
ULONG = c_ulong
USHORT = c_ushort
PULONG = POINTER(ULONG)
PVOID = c_void_p
TimeStamp = c_ulonglong
PTimeStamp = POINTER(c_ulonglong)
PLUID = POINTER(c_ulonglong)
class SecHandle(Structure):
_fields_ = [
('lower', c_void_p),
('upper', c_void_p),
]
PSecHandle = POINTER(SecHandle)
CredHandle = SecHandle
PCredHandle = PSecHandle
PCtxtHandle = PSecHandle
class SecBuffer(Structure):
_fields_ = [
('cbBuffer', ULONG),
('BufferType', ULONG),
('pvBuffer', PVOID),
]
PSecBuffer = POINTER(SecBuffer)
class SecBufferDesc(Structure):
_fields_ = [
('ulVersion', ULONG),
('cBuffers', ULONG),
('pBuffers', PSecBuffer),
]
PSecBufferDesc = POINTER(SecBufferDesc)
class SEC_WINNT_AUTH_IDENTITY(Structure):
_fields_ = [
('User', c_wchar_p),
('UserLength', c_ulong),
('Domain', c_wchar_p),
('DomainLength', c_ulong),
('Password', c_wchar_p),
('PasswordLength', c_ulong),
('Flags', c_ulong),
]
class SecPkgInfo(Structure):
_fields_ = [
('fCapabilities', ULONG),
('wVersion', USHORT),
('wRPCID', USHORT),
('cbMaxToken', ULONG),
('Name', c_wchar_p),
('Comment', c_wchar_p),
]
PSecPkgInfo = POINTER(SecPkgInfo)
class SecPkgCredentials_Names(Structure):
_fields_ = [('UserName', c_wchar_p)]
def ret_val(value):
if value < 0:
raise Exception('SSPI Error {0}'.format(Status.getname(value)))
return value
ENUMERATE_SECURITY_PACKAGES_FN = WINFUNCTYPE(
ret_val,
POINTER(c_ulong),
POINTER(POINTER(SecPkgInfo)))
ACQUIRE_CREDENTIALS_HANDLE_FN = WINFUNCTYPE(
ret_val,
c_wchar_p, # principal
c_wchar_p, # package
ULONG, # fCredentialUse
PLUID, # pvLogonID
PVOID, # pAuthData
PVOID, # pGetKeyFn
PVOID, # pvGetKeyArgument
PCredHandle, # phCredential
PTimeStamp # ptsExpiry
)
FREE_CREDENTIALS_HANDLE_FN = WINFUNCTYPE(ret_val, POINTER(SecHandle))
INITIALIZE_SECURITY_CONTEXT_FN = WINFUNCTYPE(
ret_val,
PCredHandle,
PCtxtHandle, # phContext,
c_wchar_p, # pszTargetName,
ULONG, # fContextReq,
ULONG, # Reserved1,
ULONG, # TargetDataRep,
PSecBufferDesc, # pInput,
ULONG, # Reserved2,
PCtxtHandle, # phNewContext,
PSecBufferDesc, # pOutput,
PULONG, # pfContextAttr,
PTimeStamp, # ptsExpiry
)
COMPLETE_AUTH_TOKEN_FN = WINFUNCTYPE(
ret_val,
PCtxtHandle, # phContext
PSecBufferDesc, # pToken
)
FREE_CONTEXT_BUFFER_FN = WINFUNCTYPE(ret_val, PVOID)
QUERY_CREDENTIAL_ATTRIBUTES_FN = WINFUNCTYPE(
ret_val,
PCredHandle, # cred
ULONG, # attribute
PVOID, # out buffer
)
ACCEPT_SECURITY_CONTEXT_FN = PVOID
DELETE_SECURITY_CONTEXT_FN = WINFUNCTYPE(ret_val, PCtxtHandle)
APPLY_CONTROL_TOKEN_FN = PVOID
QUERY_CONTEXT_ATTRIBUTES_FN = PVOID
IMPERSONATE_SECURITY_CONTEXT_FN = PVOID
REVERT_SECURITY_CONTEXT_FN = PVOID
MAKE_SIGNATURE_FN = PVOID
VERIFY_SIGNATURE_FN = PVOID
QUERY_SECURITY_PACKAGE_INFO_FN = WINFUNCTYPE(
ret_val,
c_wchar_p, # package name
POINTER(PSecPkgInfo),
)
EXPORT_SECURITY_CONTEXT_FN = PVOID
IMPORT_SECURITY_CONTEXT_FN = PVOID
ADD_CREDENTIALS_FN = PVOID
QUERY_SECURITY_CONTEXT_TOKEN_FN = PVOID
ENCRYPT_MESSAGE_FN = PVOID
DECRYPT_MESSAGE_FN = PVOID
SET_CONTEXT_ATTRIBUTES_FN = PVOID
class SECURITY_FUNCTION_TABLE(Structure):
_fields_ = [
('dwVersion', c_ulong),
('EnumerateSecurityPackages', ENUMERATE_SECURITY_PACKAGES_FN),
('QueryCredentialsAttributes', QUERY_CREDENTIAL_ATTRIBUTES_FN),
('AcquireCredentialsHandle', ACQUIRE_CREDENTIALS_HANDLE_FN),
('FreeCredentialsHandle', FREE_CREDENTIALS_HANDLE_FN),
('Reserved2', c_void_p),
('InitializeSecurityContext', INITIALIZE_SECURITY_CONTEXT_FN),
('AcceptSecurityContext', ACCEPT_SECURITY_CONTEXT_FN),
('CompleteAuthToken', COMPLETE_AUTH_TOKEN_FN),
('DeleteSecurityContext', DELETE_SECURITY_CONTEXT_FN),
('ApplyControlToken', APPLY_CONTROL_TOKEN_FN),
('QueryContextAttributes', QUERY_CONTEXT_ATTRIBUTES_FN),
('ImpersonateSecurityContext', IMPERSONATE_SECURITY_CONTEXT_FN),
('RevertSecurityContext', REVERT_SECURITY_CONTEXT_FN),
('MakeSignature', MAKE_SIGNATURE_FN),
('VerifySignature', VERIFY_SIGNATURE_FN),
('FreeContextBuffer', FREE_CONTEXT_BUFFER_FN),
('QuerySecurityPackageInfo', QUERY_SECURITY_PACKAGE_INFO_FN),
('Reserved3', c_void_p),
('Reserved4', c_void_p),
('ExportSecurityContext', EXPORT_SECURITY_CONTEXT_FN),
('ImportSecurityContext', IMPORT_SECURITY_CONTEXT_FN),
('AddCredentials', ADD_CREDENTIALS_FN),
('Reserved8', c_void_p),
('QuerySecurityContextToken', QUERY_SECURITY_CONTEXT_TOKEN_FN),
('EncryptMessage', ENCRYPT_MESSAGE_FN),
('DecryptMessage', DECRYPT_MESSAGE_FN),
('SetContextAttributes', SET_CONTEXT_ATTRIBUTES_FN),
]
_PInitSecurityInterface = WINFUNCTYPE(POINTER(SECURITY_FUNCTION_TABLE))
InitSecurityInterface = _PInitSecurityInterface(('InitSecurityInterfaceW', windll.secur32))
sec_fn = InitSecurityInterface()
if not sec_fn:
raise Exception('InitSecurityInterface failed')
sec_fn = sec_fn.contents
class _SecContext(object):
def close(self):
if self._handle.lower and self._handle.upper:
sec_fn.DeleteSecurityContext(self._handle)
self._handle.lower = self._handle.upper = 0
def __del__(self):
self.close()
def complete_auth_token(self, bufs):
sec_fn.CompleteAuthToken(
byref(self._handle),
byref(_make_buffers_desc(bufs)))
def next(self,
flags,
target_name=None,
byte_ordering='network',
input_buffers=None,
output_buffers=None):
input_buffers_desc = _make_buffers_desc(input_buffers) if input_buffers else None
output_buffers_desc = _make_buffers_desc(output_buffers) if output_buffers else None
status = sec_fn.InitializeSecurityContext(
byref(self._cred._handle),
byref(self._handle),
target_name,
flags,
0,
SECURITY_NETWORK_DREP if byte_ordering == 'network' else SECURITY_NATIVE_DREP,
byref(input_buffers_desc) if input_buffers_desc else None,
0,
byref(self._handle),
byref(output_buffers_desc) if input_buffers_desc else None,
byref(self._attrs),
byref(self._ts))
result_buffers = []
for i, (type, buf) in enumerate(output_buffers):
buf = buf[:output_buffers_desc.pBuffers[i].cbBuffer]
result_buffers.append((type, buf))
return status, result_buffers
class SspiCredentials(object):
def __init__(self, package, use, identity=None):
self._handle = SecHandle()
self._ts = TimeStamp()
sec_fn.AcquireCredentialsHandle(
None, package, use,
None, byref(identity) if identity and identity.Domain else None,
None, None, byref(self._handle), byref(self._ts))
def close(self):
if self._handle.lower or self._handle.upper:
sec_fn.FreeCredentialsHandle(byref(self._handle))
self._handle.lower = 0
self._handle.upper = 0
def __del__(self):
self.close()
def query_user_name(self):
names = SecPkgCredentials_Names()
try:
sec_fn.QueryCredentialsAttributes(
byref(self._handle),
SECPKG_CRED_ATTR_NAMES,
byref(names))
user_name = six.text_type(names.UserName)
finally:
p = c_wchar_p.from_buffer(names, SecPkgCredentials_Names.UserName.offset)
sec_fn.FreeContextBuffer(p)
return user_name
def create_context(
self,
flags,
target_name=None,
byte_ordering='network',
input_buffers=None,
output_buffers=None):
ctx = _SecContext()
ctx._cred = self
ctx._handle = SecHandle()
ctx._ts = TimeStamp()
ctx._attrs = ULONG()
input_buffers_desc = _make_buffers_desc(input_buffers) if input_buffers else None
output_buffers_desc = _make_buffers_desc(output_buffers) if output_buffers else None
status = sec_fn.InitializeSecurityContext(
byref(self._handle),
None,
target_name,
flags,
0,
SECURITY_NETWORK_DREP if byte_ordering == 'network' else SECURITY_NATIVE_DREP,
byref(input_buffers_desc) if input_buffers_desc else None,
0,
byref(ctx._handle),
byref(output_buffers_desc) if output_buffers_desc else None,
byref(ctx._attrs),
byref(ctx._ts))
result_buffers = []
for i, (type, buf) in enumerate(output_buffers):
buf = buf[:output_buffers_desc.pBuffers[i].cbBuffer]
result_buffers.append((type, buf))
return ctx, status, result_buffers
def _make_buffers_desc(buffers):
desc = SecBufferDesc()
desc.ulVersion = SECBUFFER_VERSION
bufs_array = (SecBuffer * len(buffers))()
for i, (type, buf) in enumerate(buffers):
bufs_array[i].BufferType = type
bufs_array[i].cbBuffer = len(buf)
bufs_array[i].pvBuffer = cast(buf, PVOID)
desc.pBuffers = bufs_array
desc.cBuffers = len(buffers)
return desc
def make_winnt_identity(domain, user_name, password):
identity = SEC_WINNT_AUTH_IDENTITY()
identity.Flags = 2 # SEC_WINNT_AUTH_IDENTITY_UNICODE
identity.Password = password
identity.PasswordLength = len(password)
identity.Domain = domain
identity.DomainLength = len(domain)
identity.User = user_name
identity.UserLength = len(user_name)
return identity
#class SspiSecBuffer(object):
# def __init__(self, type, buflen=4096):
# self._buf = create_string_buffer(int(buflen))
# self._desc = SecBuffer()
# self._desc.cbBuffer = buflen
# self._desc.BufferType = type
# self._desc.pvBuffer = cast(self._buf, PVOID)
#
#class SspiSecBuffers(object):
# def __init__(self):
# self._desc = SecBufferDesc()
# self._desc.ulVersion = SECBUFFER_VERSION
# self._descrs = (SecBuffer * 8)()
# self._desc.pBuffers = self._descrs
#
# def append(self, buf):
# if len(self._descrs) <= self._desc.cBuffers:
# newdescrs = (SecBuffer * (len(self._descrs) * 2))(*self._descrs)
# self._descrs = newdescrs
# self._desc.pBuffers = newdescrs
# self._descrs[self._desc.cBuffers] = buf._desc
# self._desc.cBuffers += 1
def enum_security_packages():
num = ULONG()
infos = POINTER(SecPkgInfo)()
status = sec_fn.EnumerateSecurityPackages(byref(num), byref(infos))
try:
return [{'caps': infos[i].fCapabilities,
'version': infos[i].wVersion,
'rpcid': infos[i].wRPCID,
'max_token': infos[i].cbMaxToken,
'name': infos[i].Name,
'comment': infos[i].Comment,
} for i in range(num.value)]
finally:
sec_fn.FreeContextBuffer(infos) | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/sspi.py | sspi.py |
import itertools
import datetime
import decimal
import struct
import re
import uuid
import six
import functools
from io import StringIO, BytesIO
from pytds.tds_base import read_chunks
from . import tds_base
from .collate import ucs2_codec, raw_collation
from . import tz
_flt4_struct = struct.Struct('f')
_flt8_struct = struct.Struct('d')
_utc = tz.utc
def _applytz(dt, tzinfo):
if not tzinfo:
return dt
dt = dt.replace(tzinfo=tzinfo)
return dt
def _decode_num(buf):
""" Decodes little-endian integer from buffer
Buffer can be of any size
"""
return functools.reduce(lambda acc, val: acc * 256 + tds_base.my_ord(val), reversed(buf), 0)
class PlpReader(object):
""" Partially length prefixed reader
Spec: http://msdn.microsoft.com/en-us/library/dd340469.aspx
"""
def __init__(self, r):
"""
:param r: An instance of :class:`_TdsReader`
"""
self._rdr = r
size = r.get_uint8()
self._size = size
def is_null(self):
"""
:return: True if stored value is NULL
"""
return self._size == tds_base.PLP_NULL
def is_unknown_len(self):
"""
:return: True if total size is unknown upfront
"""
return self._size == tds_base.PLP_UNKNOWN
def size(self):
"""
:return: Total size in bytes if is_uknown_len and is_null are both False
"""
return self._size
def chunks(self):
""" Generates chunks from stream, each chunk is an instace of bytes.
"""
if self.is_null():
return
total = 0
while True:
chunk_len = self._rdr.get_uint()
if chunk_len == 0:
if not self.is_unknown_len() and total != self._size:
msg = "PLP actual length (%d) doesn't match reported length (%d)" % (total, self._size)
self._rdr.session.bad_stream(msg)
return
total += chunk_len
left = chunk_len
while left:
buf = self._rdr.recv(left)
yield buf
left -= len(buf)
class _StreamChunkedHandler(object):
def __init__(self, stream):
self.stream = stream
def add_chunk(self, val):
self.stream.write(val)
def end(self):
return self.stream
class _DefaultChunkedHandler(object):
def __init__(self, stream):
self.stream = stream
def add_chunk(self, val):
self.stream.write(val)
def end(self):
value = self.stream.getvalue()
self.stream.seek(0)
self.stream.truncate()
return value
def __eq__(self, other):
return self.stream.getvalue() == other.stream.getvalue()
def __ne__(self, other):
return not self.__eq__(other)
class SqlTypeMetaclass(tds_base.CommonEqualityMixin):
def __repr__(self):
return '<sqltype:{}>'.format(self.get_declaration())
def get_declaration(self):
raise NotImplementedError()
class ImageType(SqlTypeMetaclass):
def get_declaration(self):
return 'IMAGE'
class BinaryType(SqlTypeMetaclass):
def __init__(self, size=30):
self._size = size
@property
def size(self):
return self._size
def get_declaration(self):
return 'BINARY({})'.format(self._size)
class VarBinaryType(SqlTypeMetaclass):
def __init__(self, size=30):
self._size = size
@property
def size(self):
return self._size
def get_declaration(self):
return 'VARBINARY({})'.format(self._size)
class VarBinaryMaxType(SqlTypeMetaclass):
def get_declaration(self):
return 'VARBINARY(MAX)'
class CharType(SqlTypeMetaclass):
def __init__(self, size=30):
self._size = size
@property
def size(self):
return self._size
def get_declaration(self):
return 'CHAR({})'.format(self._size)
class VarCharType(SqlTypeMetaclass):
def __init__(self, size=30):
self._size = size
@property
def size(self):
return self._size
def get_declaration(self):
return 'VARCHAR({})'.format(self._size)
class VarCharMaxType(SqlTypeMetaclass):
def get_declaration(self):
return 'VARCHAR(MAX)'
class NCharType(SqlTypeMetaclass):
def __init__(self, size=30):
self._size = size
@property
def size(self):
return self._size
def get_declaration(self):
return 'NCHAR({})'.format(self._size)
class NVarCharType(SqlTypeMetaclass):
def __init__(self, size=30):
self._size = size
@property
def size(self):
return self._size
def get_declaration(self):
return 'NVARCHAR({})'.format(self._size)
class NVarCharMaxType(SqlTypeMetaclass):
def get_declaration(self):
return 'NVARCHAR(MAX)'
class TextType(SqlTypeMetaclass):
def get_declaration(self):
return 'TEXT'
class NTextType(SqlTypeMetaclass):
def get_declaration(self):
return 'NTEXT'
class XmlType(SqlTypeMetaclass):
def get_declaration(self):
return 'XML'
class SmallMoneyType(SqlTypeMetaclass):
def get_declaration(self):
return 'SMALLMONEY'
class MoneyType(SqlTypeMetaclass):
def get_declaration(self):
return 'MONEY'
class DecimalType(SqlTypeMetaclass):
def __init__(self, precision=18, scale=0):
self._precision = precision
self._scale = scale
@classmethod
def from_value(cls, value):
if not (-10 ** 38 + 1 <= value <= 10 ** 38 - 1):
raise tds_base.DataError('Decimal value is out of range')
with decimal.localcontext() as context:
context.prec = 38
value = value.normalize()
_, digits, exp = value.as_tuple()
if exp > 0:
scale = 0
prec = len(digits) + exp
else:
scale = -exp
prec = max(len(digits), scale)
return cls(precision=prec, scale=scale)
@property
def precision(self):
return self._precision
@property
def scale(self):
return self._scale
def get_declaration(self):
return 'DECIMAL({}, {})'.format(self._precision, self._scale)
class UniqueIdentifierType(SqlTypeMetaclass):
def get_declaration(self):
return 'UNIQUEIDENTIFIER'
class VariantType(SqlTypeMetaclass):
def get_declaration(self):
return 'SQL_VARIANT'
class SqlValueMetaclass(tds_base.CommonEqualityMixin):
pass
class BaseTypeSerializer(tds_base.CommonEqualityMixin):
""" Base type for TDS data types.
All TDS types should derive from it.
In addition actual types should provide the following:
- type - class variable storing type identifier
"""
type = 0
def __init__(self, precision=None, scale=None, size=None):
self._precision = precision
self._scale = scale
self._size = size
@property
def precision(self):
return self._precision
@property
def scale(self):
return self._scale
@property
def size(self):
return self._size
def get_typeid(self):
""" Returns type identifier of type. """
return self.type
@classmethod
def from_stream(cls, r):
""" Class method that reads and returns a type instance.
:param r: An instance of :class:`_TdsReader` to read type from.
Should be implemented in actual types.
"""
raise NotImplementedError
def write_info(self, w):
""" Writes type info into w stream.
:param w: An instance of :class:`_TdsWriter` to write into.
Should be symmetrical to from_stream method.
Should be implemented in actual types.
"""
raise NotImplementedError
def write(self, w, value):
""" Writes type's value into stream
:param w: An instance of :class:`_TdsWriter` to write into.
:param value: A value to be stored, should be compatible with the type
Should be implemented in actual types.
"""
raise NotImplementedError
def read(self, r):
""" Reads value from the stream.
:param r: An instance of :class:`_TdsReader` to read value from.
:return: A read value.
Should be implemented in actual types.
"""
raise NotImplementedError
def set_chunk_handler(self, chunk_handler):
raise ValueError("Column type does not support chunk handler")
class BasePrimitiveTypeSerializer(BaseTypeSerializer):
""" Base type for primitive TDS data types.
Primitive type is a fixed size type with no type arguments.
All primitive TDS types should derive from it.
In addition actual types should provide the following:
- type - class variable storing type identifier
- declaration - class variable storing name of sql type
- isntance - class variable storing instance of class
"""
def write(self, w, value):
raise NotImplementedError
def read(self, r):
raise NotImplementedError
instance = None
@classmethod
def from_stream(cls, r):
return cls.instance
def write_info(self, w):
pass
class BaseTypeSerializerN(BaseTypeSerializer):
""" Base type for nullable TDS data types.
All nullable TDS types should derive from it.
In addition actual types should provide the following:
- type - class variable storing type identifier
- subtypes - class variable storing dict {subtype_size: subtype_instance}
"""
subtypes = {}
def __init__(self, size):
super(BaseTypeSerializerN, self).__init__(size=size)
assert size in self.subtypes
self._current_subtype = self.subtypes[size]
def get_typeid(self):
return self._current_subtype.get_typeid()
@classmethod
def from_stream(cls, r):
size = r.get_byte()
if size not in cls.subtypes:
raise tds_base.InterfaceError('Invalid %s size' % cls.type, size)
return cls(size)
def write_info(self, w):
w.put_byte(self.size)
def read(self, r):
size = r.get_byte()
if size == 0:
return None
if size not in self.subtypes:
raise r.session.bad_stream('Invalid %s size' % self.type, size)
return self.subtypes[size].read(r)
def write(self, w, val):
if val is None:
w.put_byte(0)
return
w.put_byte(self.size)
self._current_subtype.write(w, val)
class BitType(SqlTypeMetaclass):
type = tds_base.SYBBITN
def get_declaration(self):
return 'BIT'
class TinyIntType(SqlTypeMetaclass):
type = tds_base.SYBINTN
size = 1
def get_declaration(self):
return 'TINYINT'
class SmallIntType(SqlTypeMetaclass):
type = tds_base.SYBINTN
size = 2
def get_declaration(self):
return 'SMALLINT'
class IntType(SqlTypeMetaclass):
type = tds_base.SYBINTN
size = 4
def get_declaration(self):
return 'INT'
class BigIntType(SqlTypeMetaclass):
type = tds_base.SYBINTN
size = 8
def get_declaration(self):
return 'BIGINT'
class RealType(SqlTypeMetaclass):
def get_declaration(self):
return 'REAL'
class FloatType(SqlTypeMetaclass):
def get_declaration(self):
return 'FLOAT'
class BitSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBBIT
declaration = 'BIT'
def write(self, w, value):
w.put_byte(1 if value else 0)
def read(self, r):
return bool(r.get_byte())
BitSerializer.instance = BitSerializer()
class BitNSerializer(BaseTypeSerializerN):
type = tds_base.SYBBITN
subtypes = {1: BitSerializer.instance}
def __init__(self, typ):
super(BitNSerializer, self).__init__(size=1)
self._typ = typ
def __repr__(self):
return 'BitNSerializer({})'.format(self._typ)
BitNSerializer.instance = BitNSerializer(BitType())
class TinyIntSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBINT1
declaration = 'TINYINT'
def write(self, w, val):
w.put_byte(val)
def read(self, r):
return r.get_byte()
TinyIntSerializer.instance = TinyIntSerializer()
class SmallIntSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBINT2
declaration = 'SMALLINT'
def write(self, w, val):
w.put_smallint(val)
def read(self, r):
return r.get_smallint()
SmallIntSerializer.instance = SmallIntSerializer()
class IntSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBINT4
declaration = 'INT'
def write(self, w, val):
w.put_int(val)
def read(self, r):
return r.get_int()
IntSerializer.instance = IntSerializer()
class BigIntSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBINT8
declaration = 'BIGINT'
def write(self, w, val):
w.put_int8(val)
def read(self, r):
return r.get_int8()
BigIntSerializer.instance = BigIntSerializer()
class IntNSerializer(BaseTypeSerializerN):
type = tds_base.SYBINTN
subtypes = {
1: TinyIntSerializer.instance,
2: SmallIntSerializer.instance,
4: IntSerializer.instance,
8: BigIntSerializer.instance,
}
type_by_size = {
1: TinyIntType(),
2: SmallIntType(),
4: IntType(),
8: BigIntType(),
}
def __init__(self, typ):
super(IntNSerializer, self).__init__(size=typ.size)
self._typ = typ
@classmethod
def from_stream(cls, r):
size = r.get_byte()
if size not in cls.subtypes:
raise tds_base.InterfaceError('Invalid %s size' % cls.type, size)
return cls(cls.type_by_size[size])
def __repr__(self):
return 'IntN({})'.format(self.size)
class RealSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBREAL
declaration = 'REAL'
def write(self, w, val):
w.pack(_flt4_struct, val)
def read(self, r):
return r.unpack(_flt4_struct)[0]
RealSerializer.instance = RealSerializer()
class FloatSerializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBFLT8
declaration = 'FLOAT'
def write(self, w, val):
w.pack(_flt8_struct, val)
def read(self, r):
return r.unpack(_flt8_struct)[0]
FloatSerializer.instance = FloatSerializer()
class FloatNSerializer(BaseTypeSerializerN):
type = tds_base.SYBFLTN
subtypes = {
4: RealSerializer.instance,
8: FloatSerializer.instance,
}
class VarChar(SqlValueMetaclass):
def __init__(self, val, collation=raw_collation):
self._val = val
self._collation = collation
@property
def collation(self):
return self._collation
@property
def val(self):
return self._val
def __str__(self):
return self._val
class VarChar70Serializer(BaseTypeSerializer):
type = tds_base.XSYBVARCHAR
def __init__(self, size, collation=raw_collation, codec=None):
super(VarChar70Serializer, self).__init__(size=size)
self._collation = collation
if codec:
self._codec = codec
else:
self._codec = collation.get_codec()
@classmethod
def from_stream(cls, r):
size = r.get_smallint()
return cls(size, codec=r.session.conn.server_codec)
def write_info(self, w):
w.put_smallint(self.size)
def write(self, w, val):
if val is None:
w.put_smallint(-1)
else:
if w._tds._tds._login.bytes_to_unicode:
val = tds_base.force_unicode(val)
if isinstance(val, six.text_type):
val, _ = self._codec.encode(val)
w.put_smallint(len(val))
w.write(val)
def read(self, r):
size = r.get_smallint()
if size < 0:
return None
if r._session._tds._login.bytes_to_unicode:
return r.read_str(size, self._codec)
else:
return tds_base.readall(r, size)
class VarChar71Serializer(VarChar70Serializer):
@classmethod
def from_stream(cls, r):
size = r.get_smallint()
collation = r.get_collation()
return cls(size, collation)
def write_info(self, w):
super(VarChar71Serializer, self).write_info(w)
w.put_collation(self._collation)
class VarChar72Serializer(VarChar71Serializer):
@classmethod
def from_stream(cls, r):
size = r.get_usmallint()
collation = r.get_collation()
if size == 0xffff:
return VarCharMaxSerializer(collation)
return cls(size, collation)
class VarCharMaxSerializer(VarChar72Serializer):
def __init__(self, collation=raw_collation):
super(VarChar72Serializer, self).__init__(0, collation)
self._chunk_handler = None
def write_info(self, w):
w.put_usmallint(tds_base.PLP_MARKER)
w.put_collation(self._collation)
def write(self, w, val):
if val is None:
w.put_uint8(tds_base.PLP_NULL)
else:
if w._tds._tds._login.bytes_to_unicode:
val = tds_base.force_unicode(val)
if isinstance(val, six.text_type):
val, _ = self._codec.encode(val)
# Putting the actual length here causes an error when bulk inserting:
#
# While reading current row from host, a premature end-of-message
# was encountered--an incoming data stream was interrupted when
# the server expected to see more data. The host program may have
# terminated. Ensure that you are using a supported client
# application programming interface (API).
#
# See https://github.com/tediousjs/tedious/issues/197
# It is not known why this happens, but Microsoft's bcp tool
# uses PLP_UNKNOWN for varchar(max) as well.
w.put_uint8(tds_base.PLP_UNKNOWN)
if len(val) > 0:
w.put_uint(len(val))
w.write(val)
w.put_uint(0)
def read(self, r):
login = r._session._tds._login
r = PlpReader(r)
if r.is_null():
return None
if self._chunk_handler is None:
if login.bytes_to_unicode:
self._chunk_handler = _DefaultChunkedHandler(StringIO())
else:
self._chunk_handler = _DefaultChunkedHandler(BytesIO())
if login.bytes_to_unicode:
for chunk in tds_base.iterdecode(r.chunks(), self._codec):
self._chunk_handler.add_chunk(chunk)
else:
for chunk in r.chunks():
self._chunk_handler.add_chunk(chunk)
return self._chunk_handler.end()
def set_chunk_handler(self, chunk_handler):
self._chunk_handler = chunk_handler
class NVarChar70Serializer(BaseTypeSerializer):
type = tds_base.XSYBNVARCHAR
def __init__(self, size, collation=raw_collation):
super(NVarChar70Serializer, self).__init__(size=size)
self._collation = collation
@classmethod
def from_stream(cls, r):
size = r.get_usmallint()
return cls(size / 2)
def write_info(self, w):
w.put_usmallint(self.size * 2)
def write(self, w, val):
if val is None:
w.put_usmallint(0xffff)
else:
if isinstance(val, bytes):
val = tds_base.force_unicode(val)
buf, _ = ucs2_codec.encode(val)
l = len(buf)
w.put_usmallint(l)
w.write(buf)
def read(self, r):
size = r.get_usmallint()
if size == 0xffff:
return None
return r.read_str(size, ucs2_codec)
class NVarChar71Serializer(NVarChar70Serializer):
@classmethod
def from_stream(cls, r):
size = r.get_usmallint()
collation = r.get_collation()
return cls(size / 2, collation)
def write_info(self, w):
super(NVarChar71Serializer, self).write_info(w)
w.put_collation(self._collation)
class NVarChar72Serializer(NVarChar71Serializer):
@classmethod
def from_stream(cls, r):
size = r.get_usmallint()
collation = r.get_collation()
if size == 0xffff:
return NVarCharMaxSerializer(collation=collation)
return cls(size / 2, collation=collation)
class NVarCharMaxSerializer(NVarChar72Serializer):
def __init__(self, collation=raw_collation):
super(NVarCharMaxSerializer, self).__init__(size=-1, collation=collation)
self._chunk_handler = _DefaultChunkedHandler(StringIO())
def __repr__(self):
return 'NVarCharMax(s={},c={})'.format(self.size, repr(self._collation))
def get_typeid(self):
return tds_base.SYBNTEXT
def write_info(self, w):
w.put_usmallint(tds_base.PLP_MARKER)
w.put_collation(self._collation)
def write(self, w, val):
if val is None:
w.put_uint8(tds_base.PLP_NULL)
else:
if isinstance(val, bytes):
val = tds_base.force_unicode(val)
val, _ = ucs2_codec.encode(val)
# Putting the actual length here causes an error when bulk inserting:
#
# While reading current row from host, a premature end-of-message
# was encountered--an incoming data stream was interrupted when
# the server expected to see more data. The host program may have
# terminated. Ensure that you are using a supported client
# application programming interface (API).
#
# See https://github.com/tediousjs/tedious/issues/197
# It is not known why this happens, but Microsoft's bcp tool
# uses PLP_UNKNOWN for nvarchar(max) as well.
w.put_uint8(tds_base.PLP_UNKNOWN)
if len(val) > 0:
w.put_uint(len(val))
w.write(val)
w.put_uint(0)
def read(self, r):
r = PlpReader(r)
if r.is_null():
return None
for chunk in tds_base.iterdecode(r.chunks(), ucs2_codec):
self._chunk_handler.add_chunk(chunk)
return self._chunk_handler.end()
def set_chunk_handler(self, chunk_handler):
self._chunk_handler = chunk_handler
class XmlSerializer(NVarCharMaxSerializer):
type = tds_base.SYBMSXML
declaration = 'XML'
def __init__(self, schema=None):
super(XmlSerializer, self).__init__(0)
self._schema = schema or {}
def __repr__(self):
return 'XmlSerializer(schema={})'.format(repr(self._schema))
def get_typeid(self):
return self.type
@classmethod
def from_stream(cls, r):
has_schema = r.get_byte()
schema = {}
if has_schema:
schema['dbname'] = r.read_ucs2(r.get_byte())
schema['owner'] = r.read_ucs2(r.get_byte())
schema['collection'] = r.read_ucs2(r.get_smallint())
return cls(schema)
def write_info(self, w):
if self._schema:
w.put_byte(1)
w.put_byte(len(self._schema['dbname']))
w.write_ucs2(self._schema['dbname'])
w.put_byte(len(self._schema['owner']))
w.write_ucs2(self._schema['owner'])
w.put_usmallint(len(self._schema['collection']))
w.write_ucs2(self._schema['collection'])
else:
w.put_byte(0)
class Text70Serializer(BaseTypeSerializer):
type = tds_base.SYBTEXT
declaration = 'TEXT'
def __init__(self, size=0, table_name='', collation=raw_collation, codec=None):
super(Text70Serializer, self).__init__(size=size)
self._table_name = table_name
self._collation = collation
if codec:
self._codec = codec
else:
self._codec = collation.get_codec()
self._chunk_handler = None
def __repr__(self):
return 'Text70(size={},table_name={},codec={})'.format(self.size, self._table_name, self._codec)
@classmethod
def from_stream(cls, r):
size = r.get_int()
table_name = r.read_ucs2(r.get_smallint())
return cls(size, table_name, codec=r.session.conn.server_codec)
def write_info(self, w):
w.put_int(self.size)
def write(self, w, val):
if val is None:
w.put_int(-1)
else:
if w._tds._tds._login.bytes_to_unicode:
val = tds_base.force_unicode(val)
if isinstance(val, six.text_type):
val, _ = self._codec.encode(val)
w.put_int(len(val))
w.write(val)
def read(self, r):
size = r.get_byte()
if size == 0:
return None
tds_base.readall(r, size) # textptr
tds_base.readall(r, 8) # timestamp
colsize = r.get_int()
if self._chunk_handler is None:
if r._session._tds._login.bytes_to_unicode:
self._chunk_handler = _DefaultChunkedHandler(StringIO())
else:
self._chunk_handler = _DefaultChunkedHandler(BytesIO())
if r._session._tds._login.bytes_to_unicode:
for chunk in tds_base.iterdecode(read_chunks(r, colsize), self._codec):
self._chunk_handler.add_chunk(chunk)
else:
for chunk in read_chunks(r, colsize):
self._chunk_handler.add_chunk(chunk)
return self._chunk_handler.end()
def set_chunk_handler(self, chunk_handler):
self._chunk_handler = chunk_handler
class Text71Serializer(Text70Serializer):
def __repr__(self):
return 'Text71(size={}, table_name={}, collation={})'.format(
self.size, self._table_name, repr(self._collation)
)
@classmethod
def from_stream(cls, r):
size = r.get_int()
collation = r.get_collation()
table_name = r.read_ucs2(r.get_smallint())
return cls(size, table_name, collation)
def write_info(self, w):
w.put_int(self.size)
w.put_collation(self._collation)
class Text72Serializer(Text71Serializer):
def __init__(self, size=0, table_name_parts=(), collation=raw_collation):
super(Text72Serializer, self).__init__(size=size, table_name='.'.join(table_name_parts), collation=collation)
self._table_name_parts = table_name_parts
@classmethod
def from_stream(cls, r):
size = r.get_int()
collation = r.get_collation()
num_parts = r.get_byte()
parts = []
for _ in range(num_parts):
parts.append(r.read_ucs2(r.get_smallint()))
return cls(size, parts, collation)
class NText70Serializer(BaseTypeSerializer):
type = tds_base.SYBNTEXT
declaration = 'NTEXT'
def __init__(self, size=0, table_name='', collation=raw_collation):
super(NText70Serializer, self).__init__(size=size)
self._collation = collation
self._table_name = table_name
self._chunk_handler = _DefaultChunkedHandler(StringIO())
def __repr__(self):
return 'NText70(size={}, table_name={})'.format(self.size, self._table_name)
@classmethod
def from_stream(cls, r):
size = r.get_int()
table_name = r.read_ucs2(r.get_smallint())
return cls(size, table_name)
def read(self, r):
textptr_size = r.get_byte()
if textptr_size == 0:
return None
tds_base.readall(r, textptr_size) # textptr
tds_base.readall(r, 8) # timestamp
colsize = r.get_int()
for chunk in tds_base.iterdecode(read_chunks(r, colsize), ucs2_codec):
self._chunk_handler.add_chunk(chunk)
return self._chunk_handler.end()
def write_info(self, w):
w.put_int(self.size * 2)
def write(self, w, val):
if val is None:
w.put_int(-1)
else:
w.put_int(len(val) * 2)
w.write_ucs2(val)
def set_chunk_handler(self, chunk_handler):
self._chunk_handler = chunk_handler
class NText71Serializer(NText70Serializer):
def __repr__(self):
return 'NText71(size={}, table_name={}, collation={})'.format(self.size,
self._table_name,
repr(self._collation))
@classmethod
def from_stream(cls, r):
size = r.get_int()
collation = r.get_collation()
table_name = r.read_ucs2(r.get_smallint())
return cls(size, table_name, collation)
def write_info(self, w):
w.put_int(self.size)
w.put_collation(self._collation)
class NText72Serializer(NText71Serializer):
def __init__(self, size=0, table_name_parts=(), collation=raw_collation):
super(NText72Serializer, self).__init__(size=size, collation=collation)
self._table_name_parts = table_name_parts
def __repr__(self):
return 'NText72Serializer(s={},table_name={},coll={})'.format(
self.size, self._table_name_parts, self._collation)
@classmethod
def from_stream(cls, r):
size = r.get_int()
collation = r.get_collation()
num_parts = r.get_byte()
parts = []
for _ in range(num_parts):
parts.append(r.read_ucs2(r.get_smallint()))
return cls(size, parts, collation)
class Binary(bytes, SqlValueMetaclass):
def __repr__(self):
return 'Binary({0})'.format(super(Binary, self).__repr__())
class VarBinarySerializer(BaseTypeSerializer):
type = tds_base.XSYBVARBINARY
def __init__(self, size):
super(VarBinarySerializer, self).__init__(size=size)
def __repr__(self):
return 'VarBinary({})'.format(self.size)
@classmethod
def from_stream(cls, r):
size = r.get_usmallint()
return cls(size)
def write_info(self, w):
w.put_usmallint(self.size)
def write(self, w, val):
if val is None:
w.put_usmallint(0xffff)
else:
w.put_usmallint(len(val))
w.write(val)
def read(self, r):
size = r.get_usmallint()
if size == 0xffff:
return None
return tds_base.readall(r, size)
class VarBinarySerializer72(VarBinarySerializer):
def __repr__(self):
return 'VarBinary72({})'.format(self.size)
@classmethod
def from_stream(cls, r):
size = r.get_usmallint()
if size == 0xffff:
return VarBinarySerializerMax()
return cls(size)
class VarBinarySerializerMax(VarBinarySerializer):
def __init__(self):
super(VarBinarySerializerMax, self).__init__(0)
self._chunk_handler = _DefaultChunkedHandler(BytesIO())
def __repr__(self):
return 'VarBinaryMax()'
def write_info(self, w):
w.put_usmallint(tds_base.PLP_MARKER)
def write(self, w, val):
if val is None:
w.put_uint8(tds_base.PLP_NULL)
else:
w.put_uint8(len(val))
if val:
w.put_uint(len(val))
w.write(val)
w.put_uint(0)
def read(self, r):
r = PlpReader(r)
if r.is_null():
return None
for chunk in r.chunks():
self._chunk_handler.add_chunk(chunk)
return self._chunk_handler.end()
def set_chunk_handler(self, chunk_handler):
self._chunk_handler = chunk_handler
class UDT72Serializer(BaseTypeSerializer):
# Data type definition stream used for UDT_INFO in TYPE_INFO
# https://msdn.microsoft.com/en-us/library/a57df60e-d0a6-4e7e-a2e5-ccacd277c673/
def __init__(self, max_byte_size, db_name, schema_name, type_name,
assembly_qualified_name):
self.max_byte_size = max_byte_size
self.db_name = db_name
self.schema_name = schema_name
self.type_name = type_name
self.assembly_qualified_name = assembly_qualified_name
super(UDT72Serializer, self).__init__()
def __repr__(self):
return ('UDT72Serializer(max_byte_size={}, db_name={}, '
'schema_name={}, type_name={}, '
'assembly_qualified_name={})'.format(
*map(repr, (
self.max_byte_size, self.db_name, self.schema_name,
self.type_name, self.assembly_qualified_name)))
)
@classmethod
def from_stream(cls, r):
# MAX_BYTE_SIZE
max_byte_size = r.get_usmallint()
assert max_byte_size == 0xffff or 1 < max_byte_size < 8000
# DB_NAME -- B_VARCHAR
db_name = r.read_ucs2(r.get_byte())
# SCHEMA_NAME -- B_VARCHAR
schema_name = r.read_ucs2(r.get_byte())
# TYPE_NAME -- B_VARCHAR
type_name = r.read_ucs2(r.get_byte())
# UDT_METADATA --
# a US_VARCHAR (2 bytes length prefix)
# containing ASSEMBLY_QUALIFIED_NAME
assembly_qualified_name = r.read_ucs2(r.get_smallint())
return cls(max_byte_size, db_name, schema_name, type_name,
assembly_qualified_name)
def read(self, r):
r = PlpReader(r)
if r.is_null():
return None
return b''.join(r.chunks())
class UDT72SerializerMax(UDT72Serializer):
def __init__(self, *args, **kwargs):
super(UDT72SerializerMax, self).__init__(0, *args, **kwargs)
class Image70Serializer(BaseTypeSerializer):
type = tds_base.SYBIMAGE
declaration = 'IMAGE'
def __init__(self, size=0, table_name=''):
super(Image70Serializer, self).__init__(size=size)
self._table_name = table_name
self._chunk_handler = _DefaultChunkedHandler(BytesIO())
def __repr__(self):
return 'Image70(tn={},s={})'.format(repr(self._table_name), self.size)
@classmethod
def from_stream(cls, r):
size = r.get_int()
table_name = r.read_ucs2(r.get_smallint())
return cls(size, table_name)
def read(self, r):
size = r.get_byte()
if size == 16: # Jeff's hack
tds_base.readall(r, 16) # textptr
tds_base.readall(r, 8) # timestamp
colsize = r.get_int()
for chunk in read_chunks(r, colsize):
self._chunk_handler.add_chunk(chunk)
return self._chunk_handler.end()
else:
return None
def write(self, w, val):
if val is None:
w.put_int(-1)
return
w.put_int(len(val))
w.write(val)
def write_info(self, w):
w.put_int(self.size)
def set_chunk_handler(self, chunk_handler):
self._chunk_handler = chunk_handler
class Image72Serializer(Image70Serializer):
def __init__(self, size=0, parts=()):
super(Image72Serializer, self).__init__(size=size, table_name='.'.join(parts))
self._parts = parts
def __repr__(self):
return 'Image72(p={},s={})'.format(self._parts, self.size)
@classmethod
def from_stream(cls, r):
size = r.get_int()
num_parts = r.get_byte()
parts = []
for _ in range(num_parts):
parts.append(r.read_ucs2(r.get_usmallint()))
return Image72Serializer(size, parts)
_datetime_base_date = datetime.datetime(1900, 1, 1)
class SmallDateTimeType(SqlTypeMetaclass):
def get_declaration(self):
return 'SMALLDATETIME'
class DateTimeType(SqlTypeMetaclass):
def get_declaration(self):
return 'DATETIME'
class SmallDateTime(SqlValueMetaclass):
"""Corresponds to MSSQL smalldatetime"""
def __init__(self, days, minutes):
"""
@param days: Days since 1900-01-01
@param minutes: Minutes since 00:00:00
"""
self._days = days
self._minutes = minutes
@property
def days(self):
return self._days
@property
def minutes(self):
return self._minutes
def to_pydatetime(self):
return _datetime_base_date + datetime.timedelta(days=self._days, minutes=self._minutes)
@classmethod
def from_pydatetime(cls, dt):
days = (dt - _datetime_base_date).days
minutes = dt.hour * 60 + dt.minute
return cls(days=days, minutes=minutes)
class BaseDateTimeSerializer(BaseTypeSerializer):
def write(self, w, value):
raise NotImplementedError
def write_info(self, w):
raise NotImplementedError
def read(self, r):
raise NotImplementedError
@classmethod
def from_stream(cls, r):
raise NotImplementedError
class SmallDateTimeSerializer(BasePrimitiveTypeSerializer, BaseDateTimeSerializer):
type = tds_base.SYBDATETIME4
declaration = 'SMALLDATETIME'
_struct = struct.Struct('<HH')
def write(self, w, val):
if val.tzinfo:
if not w.session.use_tz:
raise tds_base.DataError('Timezone-aware datetime is used without specifying use_tz')
val = val.astimezone(w.session.use_tz).replace(tzinfo=None)
dt = SmallDateTime.from_pydatetime(val)
w.pack(self._struct, dt.days, dt.minutes)
def read(self, r):
days, minutes = r.unpack(self._struct)
dt = SmallDateTime(days=days, minutes=minutes)
tzinfo = None
if r.session.tzinfo_factory is not None:
tzinfo = r.session.tzinfo_factory(0)
return dt.to_pydatetime().replace(tzinfo=tzinfo)
SmallDateTimeSerializer.instance = SmallDateTimeSerializer()
class DateTime(SqlValueMetaclass):
"""Corresponds to MSSQL datetime"""
MIN_PYDATETIME = datetime.datetime(1753, 1, 1, 0, 0, 0)
MAX_PYDATETIME = datetime.datetime(9999, 12, 31, 23, 59, 59, 997000)
def __init__(self, days, time_part):
"""
@param days: Days since 1900-01-01
@param time_part: Number of 1/300 of seconds since 00:00:00
"""
self._days = days
self._time_part = time_part
@property
def days(self):
return self._days
@property
def time_part(self):
return self._time_part
def to_pydatetime(self):
ms = int(round(self._time_part % 300 * 10 / 3.0))
secs = self._time_part // 300
return _datetime_base_date + datetime.timedelta(days=self._days, seconds=secs, milliseconds=ms)
@classmethod
def from_pydatetime(cls, dt):
if not (cls.MIN_PYDATETIME <= dt <= cls.MAX_PYDATETIME):
raise tds_base.DataError('Datetime is out of range')
days = (dt - _datetime_base_date).days
ms = dt.microsecond // 1000
tm = (dt.hour * 60 * 60 + dt.minute * 60 + dt.second) * 300 + int(round(ms * 3 / 10.0))
return cls(days=days, time_part=tm)
class DateTimeSerializer(BasePrimitiveTypeSerializer, BaseDateTimeSerializer):
type = tds_base.SYBDATETIME
declaration = 'DATETIME'
_struct = struct.Struct('<ll')
def write(self, w, val):
if val.tzinfo:
if not w.session.use_tz:
raise tds_base.DataError('Timezone-aware datetime is used without specifying use_tz')
val = val.astimezone(w.session.use_tz).replace(tzinfo=None)
w.write(self.encode(val))
def read(self, r):
days, t = r.unpack(self._struct)
tzinfo = None
if r.session.tzinfo_factory is not None:
tzinfo = r.session.tzinfo_factory(0)
return _applytz(self.decode(days, t), tzinfo)
@classmethod
def encode(cls, value):
if type(value) == datetime.date:
value = datetime.datetime.combine(value, datetime.time(0, 0, 0))
dt = DateTime.from_pydatetime(value)
return cls._struct.pack(dt.days, dt.time_part)
@classmethod
def decode(cls, days, time_part):
dt = DateTime(days=days, time_part=time_part)
return dt.to_pydatetime()
DateTimeSerializer.instance = DateTimeSerializer()
class DateTimeNSerializer(BaseTypeSerializerN, BaseDateTimeSerializer):
type = tds_base.SYBDATETIMN
subtypes = {
4: SmallDateTimeSerializer.instance,
8: DateTimeSerializer.instance,
}
_datetime2_base_date = datetime.datetime(1, 1, 1)
class DateType(SqlTypeMetaclass):
type = tds_base.SYBMSDATE
def get_declaration(self):
return "DATE"
class Date(SqlValueMetaclass):
MIN_PYDATE = datetime.date(1, 1, 1)
MAX_PYDATE = datetime.date(9999, 12, 31)
def __init__(self, days):
"""
Creates sql date object
@param days: Days since 0001-01-01
"""
self._days = days
@property
def days(self):
return self._days
def to_pydate(self):
"""
Converts sql date to Python date
@return: Python date
"""
return (_datetime2_base_date + datetime.timedelta(days=self._days)).date()
@classmethod
def from_pydate(cls, pydate):
"""
Creates sql date object from Python date object.
@param pydate: Python date
@return: sql date
"""
return cls(days=(datetime.datetime.combine(pydate, datetime.time(0, 0, 0)) - _datetime2_base_date).days)
class TimeType(SqlTypeMetaclass):
type = tds_base.SYBMSTIME
def __init__(self, precision=7):
self._precision = precision
@property
def precision(self):
return self._precision
def get_declaration(self):
return 'TIME({0})'.format(self.precision)
class Time(SqlValueMetaclass):
def __init__(self, nsec):
"""
Creates sql time object.
Maximum precision which sql server supports is 100 nanoseconds.
Values more precise than 100 nanoseconds will be truncated.
@param nsec: Nanoseconds from 00:00:00
"""
self._nsec = nsec
@property
def nsec(self):
return self._nsec
def to_pytime(self):
"""
Converts sql time object into Python's time object
this will truncate nanoseconds to microseconds
@return: naive time
"""
nanoseconds = self._nsec
hours = nanoseconds // 1000000000 // 60 // 60
nanoseconds -= hours * 60 * 60 * 1000000000
minutes = nanoseconds // 1000000000 // 60
nanoseconds -= minutes * 60 * 1000000000
seconds = nanoseconds // 1000000000
nanoseconds -= seconds * 1000000000
return datetime.time(hours, minutes, seconds, nanoseconds // 1000)
@classmethod
def from_pytime(cls, pytime):
"""
Converts Python time object to sql time object
ignoring timezone
@param pytime: Python time object
@return: sql time object
"""
secs = pytime.hour * 60 * 60 + pytime.minute * 60 + pytime.second
nsec = secs * 10 ** 9 + pytime.microsecond * 1000
return cls(nsec=nsec)
class DateTime2Type(SqlTypeMetaclass):
type = tds_base.SYBMSDATETIME2
def __init__(self, precision=7):
self._precision = precision
@property
def precision(self):
return self._precision
def get_declaration(self):
return 'DATETIME2({0})'.format(self.precision)
class DateTime2(SqlValueMetaclass):
type = tds_base.SYBMSDATETIME2
def __init__(self, date, time):
"""
Creates datetime2 object
@param date: sql date object
@param time: sql time object
"""
self._date = date
self._time = time
@property
def date(self):
return self._date
@property
def time(self):
return self._time
def to_pydatetime(self):
"""
Converts datetime2 object into Python's datetime.datetime object
@return: naive datetime.datetime
"""
return datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
@classmethod
def from_pydatetime(cls, pydatetime):
"""
Creates sql datetime2 object from Python datetime object
ignoring timezone
@param pydatetime: Python datetime object
@return: sql datetime2 object
"""
return cls(date=Date.from_pydate(pydatetime.date),
time=Time.from_pytime(pydatetime.time))
class DateTimeOffsetType(SqlTypeMetaclass):
type = tds_base.SYBMSDATETIMEOFFSET
def __init__(self, precision=7):
self._precision = precision
@property
def precision(self):
return self._precision
def get_declaration(self):
return 'DATETIMEOFFSET({0})'.format(self.precision)
class DateTimeOffset(SqlValueMetaclass):
def __init__(self, date, time, offset):
"""
Creates datetime2 object
@param date: sql date object in UTC
@param time: sql time object in UTC
@param offset: time zone offset in minutes
"""
self._date = date
self._time = time
self._offset = offset
def to_pydatetime(self):
"""
Converts datetimeoffset object into Python's datetime.datetime object
@return: time zone aware datetime.datetime
"""
dt = datetime.datetime.combine(self._date.to_pydate(), self._time.to_pytime())
from .tz import FixedOffsetTimezone
return dt.replace(tzinfo=_utc).astimezone(FixedOffsetTimezone(self._offset))
class BaseDateTime73Serializer(BaseTypeSerializer):
def write(self, w, value):
raise NotImplementedError
def write_info(self, w):
raise NotImplementedError
def read(self, r):
raise NotImplementedError
@classmethod
def from_stream(cls, r):
raise NotImplementedError
_precision_to_len = {
0: 3,
1: 3,
2: 3,
3: 4,
4: 4,
5: 5,
6: 5,
7: 5,
}
def _write_time(self, w, t, prec):
val = t.nsec // (10 ** (9 - prec))
w.write(struct.pack('<Q', val)[:self._precision_to_len[prec]])
@staticmethod
def _read_time(r, size, prec):
time_buf = tds_base.readall(r, size)
val = _decode_num(time_buf)
val *= 10 ** (7 - prec)
nanoseconds = val * 100
return Time(nsec=nanoseconds)
@staticmethod
def _write_date(w, value):
days = value.days
buf = struct.pack('<l', days)[:3]
w.write(buf)
@staticmethod
def _read_date(r):
days = _decode_num(tds_base.readall(r, 3))
return Date(days=days)
class MsDateSerializer(BasePrimitiveTypeSerializer, BaseDateTime73Serializer):
type = tds_base.SYBMSDATE
declaration = 'DATE'
def __init__(self, typ):
super(MsDateSerializer, self).__init__()
self._typ = typ
@classmethod
def from_stream(cls, r):
return cls(DateType())
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
w.put_byte(3)
self._write_date(w, Date.from_pydate(value))
def read_fixed(self, r):
return self._read_date(r).to_pydate()
def read(self, r):
size = r.get_byte()
if size == 0:
return None
return self._read_date(r).to_pydate()
class MsTimeSerializer(BaseDateTime73Serializer):
type = tds_base.SYBMSTIME
def __init__(self, typ):
super(MsTimeSerializer, self).__init__(precision=typ.precision, size=self._precision_to_len[typ.precision])
self._typ = typ
@classmethod
def read_type(cls, r):
prec = r.get_byte()
return TimeType(precision=prec)
@classmethod
def from_stream(cls, r):
return cls(cls.read_type(r))
def write_info(self, w):
w.put_byte(self._typ.precision)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
if value.tzinfo:
if not w.session.use_tz:
raise tds_base.DataError('Timezone-aware datetime is used without specifying use_tz')
value = value.astimezone(w.session.use_tz).replace(tzinfo=None)
w.put_byte(self.size)
self._write_time(w, Time.from_pytime(value), self._typ.precision)
def read_fixed(self, r, size):
res = self._read_time(r, size, self._typ.precision).to_pytime()
if r.session.tzinfo_factory is not None:
tzinfo = r.session.tzinfo_factory(0)
res = res.replace(tzinfo=tzinfo)
return res
def read(self, r):
size = r.get_byte()
if size == 0:
return None
return self.read_fixed(r, size)
class DateTime2Serializer(BaseDateTime73Serializer):
type = tds_base.SYBMSDATETIME2
def __init__(self, typ):
super(DateTime2Serializer, self).__init__(precision=typ.precision,
size=self._precision_to_len[typ.precision] + 3)
self._typ = typ
@classmethod
def from_stream(cls, r):
prec = r.get_byte()
return cls(DateTime2Type(precision=prec))
def write_info(self, w):
w.put_byte(self._typ.precision)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
if value.tzinfo:
if not w.session.use_tz:
raise tds_base.DataError('Timezone-aware datetime is used without specifying use_tz')
value = value.astimezone(w.session.use_tz).replace(tzinfo=None)
w.put_byte(self.size)
self._write_time(w, Time.from_pytime(value), self._typ.precision)
self._write_date(w, Date.from_pydate(value))
def read_fixed(self, r, size):
time = self._read_time(r, size - 3, self._typ.precision)
date = self._read_date(r)
dt = DateTime2(date=date, time=time)
res = dt.to_pydatetime()
if r.session.tzinfo_factory is not None:
tzinfo = r.session.tzinfo_factory(0)
res = res.replace(tzinfo=tzinfo)
return res
def read(self, r):
size = r.get_byte()
if size == 0:
return None
return self.read_fixed(r, size)
class DateTimeOffsetSerializer(BaseDateTime73Serializer):
type = tds_base.SYBMSDATETIMEOFFSET
def __init__(self, typ):
super(DateTimeOffsetSerializer, self).__init__(precision=typ.precision,
size=self._precision_to_len[typ.precision] + 5)
self._typ = typ
@classmethod
def from_stream(cls, r):
prec = r.get_byte()
return cls(DateTimeOffsetType(precision=prec))
def write_info(self, w):
w.put_byte(self._typ.precision)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
utcoffset = value.utcoffset()
value = value.astimezone(_utc).replace(tzinfo=None)
w.put_byte(self.size)
self._write_time(w, Time.from_pytime(value), self._typ.precision)
self._write_date(w, Date.from_pydate(value))
w.put_smallint(int(tds_base.total_seconds(utcoffset)) // 60)
def read_fixed(self, r, size):
time = self._read_time(r, size - 5, self._typ.precision)
date = self._read_date(r)
offset = r.get_smallint()
dt = DateTimeOffset(date=date, time=time, offset=offset)
return dt.to_pydatetime()
def read(self, r):
size = r.get_byte()
if size == 0:
return None
return self.read_fixed(r, size)
class MsDecimalSerializer(BaseTypeSerializer):
type = tds_base.SYBDECIMAL
_max_size = 17
_bytes_per_prec = [
#
# precision can't be 0 but using a value > 0 assure no
# core if for some bug it's 0...
#
1,
5, 5, 5, 5, 5, 5, 5, 5, 5,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
13, 13, 13, 13, 13, 13, 13, 13, 13,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
]
_info_struct = struct.Struct('BBB')
def __init__(self, precision=18, scale=0):
super(MsDecimalSerializer, self).__init__(precision=precision,
scale=scale,
size=self._bytes_per_prec[precision])
if precision > 38:
raise tds_base.DataError('Precision of decimal value is out of range')
def __repr__(self):
return 'MsDecimal(scale={}, prec={})'.format(self.scale, self.precision)
@classmethod
def from_value(cls, value):
sql_type = DecimalType.from_value(value)
return cls(scale=sql_type.scale, prec=sql_type.precision)
@classmethod
def from_stream(cls, r):
size, prec, scale = r.unpack(cls._info_struct)
return cls(scale=scale, precision=prec)
def write_info(self, w):
w.pack(self._info_struct, self.size, self.precision, self.scale)
def write(self, w, value):
with decimal.localcontext() as context:
context.prec = 38
if value is None:
w.put_byte(0)
return
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(value)
value = value.normalize()
scale = self.scale
size = self.size
w.put_byte(size)
val = value
positive = 1 if val > 0 else 0
w.put_byte(positive) # sign
if not positive:
val *= -1
size -= 1
val *= 10 ** scale
for i in range(size):
w.put_byte(int(val % 256))
val //= 256
assert val == 0
def _decode(self, positive, buf):
val = _decode_num(buf)
val = decimal.Decimal(val)
with decimal.localcontext() as ctx:
ctx.prec = 38
if not positive:
val *= -1
val /= 10 ** self._scale
return val
def read_fixed(self, r, size):
positive = r.get_byte()
buf = tds_base.readall(r, size - 1)
return self._decode(positive, buf)
def read(self, r):
size = r.get_byte()
if size <= 0:
return None
return self.read_fixed(r, size)
class Money4Serializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBMONEY4
declaration = 'SMALLMONEY'
def read(self, r):
return decimal.Decimal(r.get_int()) / 10000
def write(self, w, val):
val = int(val * 10000)
w.put_int(val)
Money4Serializer.instance = Money4Serializer()
class Money8Serializer(BasePrimitiveTypeSerializer):
type = tds_base.SYBMONEY
declaration = 'MONEY'
_struct = struct.Struct('<lL')
def read(self, r):
hi, lo = r.unpack(self._struct)
val = hi * (2 ** 32) + lo
return decimal.Decimal(val) / 10000
def write(self, w, val):
val *= 10000
hi = int(val // (2 ** 32))
lo = int(val % (2 ** 32))
w.pack(self._struct, hi, lo)
Money8Serializer.instance = Money8Serializer()
class MoneyNSerializer(BaseTypeSerializerN):
type = tds_base.SYBMONEYN
subtypes = {
4: Money4Serializer.instance,
8: Money8Serializer.instance,
}
class MsUniqueSerializer(BaseTypeSerializer):
type = tds_base.SYBUNIQUE
declaration = 'UNIQUEIDENTIFIER'
instance = None
def __repr__(self):
return 'MsUniqueSerializer()'
@classmethod
def from_stream(cls, r):
size = r.get_byte()
if size != 16:
raise tds_base.InterfaceError('Invalid size of UNIQUEIDENTIFIER field')
return cls.instance
def write_info(self, w):
w.put_byte(16)
def write(self, w, value):
if value is None:
w.put_byte(0)
else:
w.put_byte(16)
w.write(value.bytes_le)
@staticmethod
def read_fixed(r, size):
return uuid.UUID(bytes_le=tds_base.readall(r, size))
def read(self, r):
size = r.get_byte()
if size == 0:
return None
if size != 16:
raise tds_base.InterfaceError('Invalid size of UNIQUEIDENTIFIER field')
return self.read_fixed(r, size)
MsUniqueSerializer.instance = MsUniqueSerializer()
def _variant_read_str(r, size):
collation = r.get_collation()
r.get_usmallint()
return r.read_str(size, collation.get_codec())
def _variant_read_nstr(r, size):
r.get_collation()
r.get_usmallint()
return r.read_str(size, ucs2_codec)
def _variant_read_decimal(r, size):
prec, scale = r.unpack(VariantSerializer.decimal_info_struct)
return MsDecimalSerializer(precision=prec, scale=scale).read_fixed(r, size)
def _variant_read_binary(r, size):
r.get_usmallint()
return tds_base.readall(r, size)
class VariantSerializer(BaseTypeSerializer):
type = tds_base.SYBVARIANT
declaration = 'SQL_VARIANT'
decimal_info_struct = struct.Struct('BB')
_type_map = {
tds_base.GUIDTYPE: lambda r, size: MsUniqueSerializer.instance.read_fixed(r, size),
tds_base.BITTYPE: lambda r, size: BitSerializer.instance.read(r),
tds_base.INT1TYPE: lambda r, size: TinyIntSerializer.instance.read(r),
tds_base.INT2TYPE: lambda r, size: SmallIntSerializer.instance.read(r),
tds_base.INT4TYPE: lambda r, size: IntSerializer.instance.read(r),
tds_base.INT8TYPE: lambda r, size: BigIntSerializer.instance.read(r),
tds_base.DATETIMETYPE: lambda r, size: DateTimeSerializer.instance.read(r),
tds_base.DATETIM4TYPE: lambda r, size: SmallDateTimeSerializer.instance.read(r),
tds_base.FLT4TYPE: lambda r, size: RealSerializer.instance.read(r),
tds_base.FLT8TYPE: lambda r, size: FloatSerializer.instance.read(r),
tds_base.MONEYTYPE: lambda r, size: Money8Serializer.instance.read(r),
tds_base.MONEY4TYPE: lambda r, size: Money4Serializer.instance.read(r),
tds_base.DATENTYPE: lambda r, size: MsDateSerializer(DateType()).read_fixed(r),
tds_base.TIMENTYPE: lambda r, size: MsTimeSerializer(TimeType(precision=r.get_byte())).read_fixed(r, size),
tds_base.DATETIME2NTYPE: lambda r, size: DateTime2Serializer(
DateTime2Type(precision=r.get_byte())).read_fixed(r, size),
tds_base.DATETIMEOFFSETNTYPE: lambda r, size: DateTimeOffsetSerializer(
DateTimeOffsetType(precision=r.get_byte())).read_fixed(r, size),
tds_base.BIGVARBINTYPE: _variant_read_binary,
tds_base.BIGBINARYTYPE: _variant_read_binary,
tds_base.NUMERICNTYPE: _variant_read_decimal,
tds_base.DECIMALNTYPE: _variant_read_decimal,
tds_base.BIGVARCHRTYPE: _variant_read_str,
tds_base.BIGCHARTYPE: _variant_read_str,
tds_base.NVARCHARTYPE: _variant_read_nstr,
tds_base.NCHARTYPE: _variant_read_nstr,
}
@classmethod
def from_stream(cls, r):
size = r.get_int()
return VariantSerializer(size)
def write_info(self, w):
w.put_int(self.size)
def read(self, r):
size = r.get_int()
if size == 0:
return None
type_id = r.get_byte()
prop_bytes = r.get_byte()
type_factory = self._type_map.get(type_id)
if not type_factory:
r.session.bad_stream('Variant type invalid', type_id)
return type_factory(r, size - prop_bytes - 2)
def write(self, w, val):
if val is None:
w.put_int(0)
return
raise NotImplementedError
class TableType(SqlTypeMetaclass):
"""
Used to serialize table valued parameters
spec: https://msdn.microsoft.com/en-us/library/dd304813.aspx
"""
def __init__(self, typ_schema, typ_name, columns):
"""
@param typ_schema: Schema where TVP type defined
@param typ_name: Name of TVP type
@param columns: List of column types
"""
if len(typ_schema) > 128:
raise ValueError("Schema part of TVP name should be no longer than 128 characters")
if len(typ_name) > 128:
raise ValueError("Name part of TVP name should be no longer than 128 characters")
if columns is not None:
if len(columns) > 1024:
raise ValueError("TVP cannot have more than 1024 columns")
if len(columns) < 1:
raise ValueError("TVP must have at least one column")
self._typ_dbname = '' # dbname should always be empty string for TVP according to spec
self._typ_schema = typ_schema
self._typ_name = typ_name
self._columns = columns
def __repr__(self):
return 'TableType(s={},n={},cols={})'.format(
self._typ_schema, self._typ_name, repr(self._columns)
)
def get_declaration(self):
assert not self._typ_dbname
if self._typ_schema:
full_name = '{}.{}'.format(self._typ_schema, self._typ_name)
else:
full_name = self._typ_name
return '{} READONLY'.format(full_name)
@property
def typ_schema(self):
return self._typ_schema
@property
def typ_name(self):
return self._typ_name
@property
def columns(self):
return self._columns
class TableValuedParam(SqlValueMetaclass):
"""
Used to represent a value of table-valued parameter
"""
def __init__(self, type_name=None, columns=None, rows=None):
# parsing type name
self._typ_schema = ''
self._typ_name = ''
if type_name:
parts = type_name.split('.')
if len(parts) > 2:
raise ValueError('Type name should consist of at most 2 parts, e.g. dbo.MyType')
self._typ_name = parts[-1]
if len(parts) > 1:
self._typ_schema = parts[0]
self._columns = columns
self._rows = rows
@property
def typ_name(self):
return self._typ_name
@property
def typ_schema(self):
return self._typ_schema
@property
def columns(self):
return self._columns
@property
def rows(self):
return self._rows
def is_null(self):
return self._rows is None
def peek_row(self):
try:
rows = iter(self._rows)
except TypeError:
raise tds_base.DataError('rows should be iterable')
try:
row = next(rows)
except StopIteration:
# no rows
raise tds_base.DataError("Cannot infer columns from rows for TVP because there are no rows")
else:
# put row back
self._rows = itertools.chain([row], rows)
return row
class TableSerializer(BaseTypeSerializer):
"""
Used to serialize table valued parameters
spec: https://msdn.microsoft.com/en-us/library/dd304813.aspx
"""
type = tds_base.TVPTYPE
def read(self, r):
""" According to spec TDS does not support output TVP values """
raise NotImplementedError
@classmethod
def from_stream(cls, r):
""" According to spec TDS does not support output TVP values """
raise NotImplementedError
def __init__(self, table_type, columns_serializers):
super(TableSerializer, self).__init__()
self._table_type = table_type
self._columns_serializers = columns_serializers
@property
def table_type(self):
return self._table_type
def __repr__(self):
return 'TableSerializer(t={},c={})'.format(
repr(self._table_type), repr(self._columns_serializers)
)
def write_info(self, w):
"""
Writes TVP_TYPENAME structure
spec: https://msdn.microsoft.com/en-us/library/dd302994.aspx
@param w: TdsWriter
@return:
"""
w.write_b_varchar("") # db_name, should be empty
w.write_b_varchar(self._table_type.typ_schema)
w.write_b_varchar(self._table_type.typ_name)
def write(self, w, val):
"""
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA
specs:
https://msdn.microsoft.com/en-us/library/dd302994.aspx
https://msdn.microsoft.com/en-us/library/dd305261.aspx
https://msdn.microsoft.com/en-us/library/dd303230.aspx
@param w: TdsWriter
@param val: TableValuedParam or None
@return:
"""
if val.is_null():
w.put_usmallint(tds_base.TVP_NULL_TOKEN)
else:
columns = self._table_type.columns
w.put_usmallint(len(columns))
for i, column in enumerate(columns):
w.put_uint(column.column_usertype)
w.put_usmallint(column.flags)
# TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx
serializer = self._columns_serializers[i]
type_id = serializer.type
w.put_byte(type_id)
serializer.write_info(w)
w.write_b_varchar('') # ColName, must be empty in TVP according to spec
# here can optionally send TVP_ORDER_UNIQUE and TVP_COLUMN_ORDERING
# https://msdn.microsoft.com/en-us/library/dd305261.aspx
# terminating optional metadata
w.put_byte(tds_base.TVP_END_TOKEN)
# now sending rows using TVP_ROW
# https://msdn.microsoft.com/en-us/library/dd305261.aspx
if val.rows:
for row in val.rows:
w.put_byte(tds_base.TVP_ROW_TOKEN)
for i, col in enumerate(self._table_type.columns):
if not col.flags & tds_base.TVP_COLUMN_DEFAULT_FLAG:
self._columns_serializers[i].write(w, row[i])
# terminating rows
w.put_byte(tds_base.TVP_END_TOKEN)
_type_map = {
tds_base.SYBINT1: TinyIntSerializer,
tds_base.SYBINT2: SmallIntSerializer,
tds_base.SYBINT4: IntSerializer,
tds_base.SYBINT8: BigIntSerializer,
tds_base.SYBINTN: IntNSerializer,
tds_base.SYBBIT: BitSerializer,
tds_base.SYBBITN: BitNSerializer,
tds_base.SYBREAL: RealSerializer,
tds_base.SYBFLT8: FloatSerializer,
tds_base.SYBFLTN: FloatNSerializer,
tds_base.SYBMONEY4: Money4Serializer,
tds_base.SYBMONEY: Money8Serializer,
tds_base.SYBMONEYN: MoneyNSerializer,
tds_base.XSYBCHAR: VarChar70Serializer,
tds_base.XSYBVARCHAR: VarChar70Serializer,
tds_base.XSYBNCHAR: NVarChar70Serializer,
tds_base.XSYBNVARCHAR: NVarChar70Serializer,
tds_base.SYBTEXT: Text70Serializer,
tds_base.SYBNTEXT: NText70Serializer,
tds_base.SYBMSXML: XmlSerializer,
tds_base.XSYBBINARY: VarBinarySerializer,
tds_base.XSYBVARBINARY: VarBinarySerializer,
tds_base.SYBIMAGE: Image70Serializer,
tds_base.SYBNUMERIC: MsDecimalSerializer,
tds_base.SYBDECIMAL: MsDecimalSerializer,
tds_base.SYBVARIANT: VariantSerializer,
tds_base.SYBMSDATE: MsDateSerializer,
tds_base.SYBMSTIME: MsTimeSerializer,
tds_base.SYBMSDATETIME2: DateTime2Serializer,
tds_base.SYBMSDATETIMEOFFSET: DateTimeOffsetSerializer,
tds_base.SYBDATETIME4: SmallDateTimeSerializer,
tds_base.SYBDATETIME: DateTimeSerializer,
tds_base.SYBDATETIMN: DateTimeNSerializer,
tds_base.SYBUNIQUE: MsUniqueSerializer,
}
_type_map71 = _type_map.copy()
_type_map71.update({
tds_base.XSYBCHAR: VarChar71Serializer,
tds_base.XSYBNCHAR: NVarChar71Serializer,
tds_base.XSYBVARCHAR: VarChar71Serializer,
tds_base.XSYBNVARCHAR: NVarChar71Serializer,
tds_base.SYBTEXT: Text71Serializer,
tds_base.SYBNTEXT: NText71Serializer,
})
_type_map72 = _type_map.copy()
_type_map72.update({
tds_base.XSYBCHAR: VarChar72Serializer,
tds_base.XSYBNCHAR: NVarChar72Serializer,
tds_base.XSYBVARCHAR: VarChar72Serializer,
tds_base.XSYBNVARCHAR: NVarChar72Serializer,
tds_base.SYBTEXT: Text72Serializer,
tds_base.SYBNTEXT: NText72Serializer,
tds_base.XSYBBINARY: VarBinarySerializer72,
tds_base.XSYBVARBINARY: VarBinarySerializer72,
tds_base.SYBIMAGE: Image72Serializer,
tds_base.UDTTYPE: UDT72Serializer,
})
_type_map73 = _type_map72.copy()
_type_map73.update({
tds_base.TVPTYPE: TableSerializer,
})
def sql_type_by_declaration(declaration):
return _declarations_parser.parse(declaration)
class SerializerFactory(object):
"""
Factory class for TDS data types
"""
def __init__(self, tds_ver):
self._tds_ver = tds_ver
if self._tds_ver >= tds_base.TDS73:
self._type_map = _type_map73
elif self._tds_ver >= tds_base.TDS72:
self._type_map = _type_map72
elif self._tds_ver >= tds_base.TDS71:
self._type_map = _type_map71
else:
self._type_map = _type_map
def get_type_serializer(self, tds_type_id):
type_class = self._type_map.get(tds_type_id)
if not type_class:
raise tds_base.InterfaceError('Invalid type id {}'.format(tds_type_id))
return type_class
def long_binary_type(self):
if self._tds_ver >= tds_base.TDS72:
return VarBinaryMaxType()
else:
return ImageType()
def long_varchar_type(self):
if self._tds_ver >= tds_base.TDS72:
return VarCharMaxType()
else:
return TextType()
def long_string_type(self):
if self._tds_ver >= tds_base.TDS72:
return NVarCharMaxType()
else:
return NTextType()
def datetime(self, precision):
if self._tds_ver >= tds_base.TDS72:
return DateTime2Type(precision=precision)
else:
return DateTimeType()
def has_datetime_with_tz(self):
return self._tds_ver >= tds_base.TDS72
def datetime_with_tz(self, precision):
if self._tds_ver >= tds_base.TDS72:
return DateTimeOffsetType(precision=precision)
else:
raise tds_base.DataError('Given TDS version does not support DATETIMEOFFSET type')
def date(self):
if self._tds_ver >= tds_base.TDS72:
return DateType()
else:
return DateTimeType()
def time(self, precision):
if self._tds_ver >= tds_base.TDS72:
return TimeType(precision=precision)
else:
raise tds_base.DataError('Given TDS version does not support TIME type')
def serializer_by_declaration(self, declaration, connection):
sql_type = sql_type_by_declaration(declaration)
return self.serializer_by_type(sql_type=sql_type, collation=connection.collation)
def serializer_by_type(self, sql_type, collation=raw_collation):
typ = sql_type
if isinstance(typ, BitType):
return BitNSerializer(typ)
elif isinstance(typ, TinyIntType):
return IntNSerializer(typ)
elif isinstance(typ, SmallIntType):
return IntNSerializer(typ)
elif isinstance(typ, IntType):
return IntNSerializer(typ)
elif isinstance(typ, BigIntType):
return IntNSerializer(typ)
elif isinstance(typ, RealType):
return FloatNSerializer(size=4)
elif isinstance(typ, FloatType):
return FloatNSerializer(size=8)
elif isinstance(typ, SmallMoneyType):
return self._type_map[tds_base.SYBMONEYN](size=4)
elif isinstance(typ, MoneyType):
return self._type_map[tds_base.SYBMONEYN](size=8)
elif isinstance(typ, CharType):
return self._type_map[tds_base.XSYBCHAR](size=typ.size, collation=collation)
elif isinstance(typ, VarCharType):
return self._type_map[tds_base.XSYBVARCHAR](size=typ.size, collation=collation)
elif isinstance(typ, VarCharMaxType):
return VarCharMaxSerializer(collation=collation)
elif isinstance(typ, NCharType):
return self._type_map[tds_base.XSYBNCHAR](size=typ.size, collation=collation)
elif isinstance(typ, NVarCharType):
return self._type_map[tds_base.XSYBNVARCHAR](size=typ.size, collation=collation)
elif isinstance(typ, NVarCharMaxType):
return NVarCharMaxSerializer(collation=collation)
elif isinstance(typ, TextType):
return self._type_map[tds_base.SYBTEXT](collation=collation)
elif isinstance(typ, NTextType):
return self._type_map[tds_base.SYBNTEXT](collation=collation)
elif isinstance(typ, XmlType):
return self._type_map[tds_base.SYBMSXML]()
elif isinstance(typ, BinaryType):
return self._type_map[tds_base.XSYBBINARY]()
elif isinstance(typ, VarBinaryType):
return self._type_map[tds_base.XSYBVARBINARY](size=typ.size)
elif isinstance(typ, VarBinaryMaxType):
return VarBinarySerializerMax()
elif isinstance(typ, ImageType):
return self._type_map[tds_base.SYBIMAGE]()
elif isinstance(typ, DecimalType):
return self._type_map[tds_base.SYBDECIMAL](scale=typ.scale, precision=typ.precision)
elif isinstance(typ, VariantType):
return self._type_map[tds_base.SYBVARIANT](size=0)
elif isinstance(typ, SmallDateTimeType):
return self._type_map[tds_base.SYBDATETIMN](size=4)
elif isinstance(typ, DateTimeType):
return self._type_map[tds_base.SYBDATETIMN](size=8)
elif isinstance(typ, DateType):
return self._type_map[tds_base.SYBMSDATE](typ)
elif isinstance(typ, TimeType):
return self._type_map[tds_base.SYBMSTIME](typ)
elif isinstance(typ, DateTime2Type):
return self._type_map[tds_base.SYBMSDATETIME2](typ)
elif isinstance(typ, DateTimeOffsetType):
return self._type_map[tds_base.SYBMSDATETIMEOFFSET](typ)
elif isinstance(typ, UniqueIdentifierType):
return self._type_map[tds_base.SYBUNIQUE]()
elif isinstance(typ, TableType):
columns_serializers = None
if typ.columns is not None:
columns_serializers = [self.serializer_by_type(col.type) for col in typ.columns]
return TableSerializer(table_type=typ, columns_serializers=columns_serializers)
else:
raise ValueError('Cannot map type {} to serializer.'.format(typ))
class DeclarationsParser(object):
def __init__(self):
declaration_parsers = [
('bit', BitType),
('tinyint', TinyIntType),
('smallint', SmallIntType),
('(?:int|integer)', IntType),
('bigint', BigIntType),
('real', RealType),
('(?:float|double precision)', FloatType),
('(?:char|character)', CharType),
(r'(?:char|character)\((\d+)\)', lambda size_str: CharType(size=int(size_str))),
(r'(?:varchar|char(?:|acter)\s+varying)', VarCharType),
(r'(?:varchar|char(?:|acter)\s+varying)\((\d+)\)', lambda size_str: VarCharType(size=int(size_str))),
(r'varchar\(max\)', VarCharMaxType),
(r'(?:nchar|national\s+(?:char|character))', NCharType),
(r'(?:nchar|national\s+(?:char|character))\((\d+)\)', lambda size_str: NCharType(size=int(size_str))),
(r'(?:nvarchar|national\s+(?:char|character)\s+varying)', NVarCharType),
(r'(?:nvarchar|national\s+(?:char|character)\s+varying)\((\d+)\)',
lambda size_str: NVarCharType(size=int(size_str))),
(r'nvarchar\(max\)', NVarCharMaxType),
('xml', XmlType),
('text', TextType),
(r'(?:ntext|national\s+text)', NTextType),
('binary', BinaryType),
('binary\((\d+)\)', lambda size_str: BinaryType(size=int(size_str))),
('(?:varbinary|binary varying)', VarBinaryType),
(r'(?:varbinary|binary varying)\((\d+)\)', lambda size_str: VarBinaryType(size=int(size_str))),
(r'varbinary\(max\)', VarBinaryMaxType),
('image', ImageType),
('smalldatetime', SmallDateTimeType),
('datetime', DateTimeType),
('date', DateType),
(r'time', TimeType),
(r'time\((\d+)\)', lambda precision_str: TimeType(precision=int(precision_str))),
('datetime2', DateTime2Type),
(r'datetime2\((\d+)\)', lambda precision_str: DateTime2Type(precision=int(precision_str))),
('datetimeoffset', DateTimeOffsetType),
(r'datetimeoffset\((\d+)\)',
lambda precision_str: DateTimeOffsetType(precision=int(precision_str))),
('(?:decimal|dec|numeric)', DecimalType),
('(?:decimal|dec|numeric)\((\d+)\)',
lambda precision_str: DecimalType(precision=int(precision_str))),
('(?:decimal|dec|numeric)\((\d+), (\d+)\)',
lambda precision_str, scale_str: DecimalType(precision=int(precision_str), scale=int(scale_str))),
('smallmoney', SmallMoneyType),
('money', MoneyType),
('uniqueidentifier', UniqueIdentifierType),
('sql_variant', VariantType),
]
self._compiled = [(re.compile(r'^' + regex + '$', re.IGNORECASE), constructor)
for regex, constructor in declaration_parsers]
def parse(self, declaration):
"""
Parse sql type declaration, e.g. varchar(10) and return instance of corresponding type class,
e.g. VarCharType(10)
@param declaration: Sql declaration to parse, e.g. varchar(10)
@return: instance of SqlTypeMetaclass
"""
declaration = declaration.strip()
for regex, constructor in self._compiled:
m = regex.match(declaration)
if m:
return constructor(*m.groups())
raise ValueError('Unable to parse type declaration', declaration)
_declarations_parser = DeclarationsParser()
class TdsTypeInferrer(object):
def __init__(self, type_factory, collation=None, bytes_to_unicode=False, allow_tz=False):
"""
Class used to do TDS type inference
:param type_factory: Instance of TypeFactory
:param collation: Collation to use for strings
:param bytes_to_unicode: Treat bytes type as unicode string
:param allow_tz: Allow usage of DATETIMEOFFSET type
"""
self._type_factory = type_factory
self._collation = collation
self._bytes_to_unicode = bytes_to_unicode
self._allow_tz = allow_tz
def from_value(self, value):
""" Function infers TDS type from Python value.
:param value: value from which to infer TDS type
:return: An instance of subclass of :class:`BaseType`
"""
if value is None:
sql_type = NVarCharType(size=1)
else:
sql_type = self._from_class_value(value, type(value))
return sql_type
def from_class(self, cls):
""" Function infers TDS type from Python class.
:param cls: Class from which to infer type
:return: An instance of subclass of :class:`BaseType`
"""
return self._from_class_value(None, cls)
def _from_class_value(self, value, value_type):
type_factory = self._type_factory
bytes_to_unicode = self._bytes_to_unicode
allow_tz = self._allow_tz
if issubclass(value_type, bool):
return BitType()
elif issubclass(value_type, six.integer_types):
if value is None:
return IntType()
if -2 ** 31 <= value <= 2 ** 31 - 1:
return IntType()
elif -2 ** 63 <= value <= 2 ** 63 - 1:
return BigIntType()
elif -10 ** 38 + 1 <= value <= 10 ** 38 - 1:
return DecimalType(precision=38)
else:
return VarCharMaxType()
elif issubclass(value_type, float):
return FloatType()
elif issubclass(value_type, Binary):
if value is None or len(value) <= 8000:
return VarBinaryType(size=8000)
else:
return type_factory.long_binary_type()
elif issubclass(value_type, six.binary_type):
if bytes_to_unicode:
return type_factory.long_string_type()
else:
return type_factory.long_varchar_type()
elif issubclass(value_type, six.string_types):
return type_factory.long_string_type()
elif issubclass(value_type, datetime.datetime):
if value and value.tzinfo and allow_tz:
return type_factory.datetime_with_tz(precision=6)
else:
return type_factory.datetime(precision=6)
elif issubclass(value_type, datetime.date):
return type_factory.date()
elif issubclass(value_type, datetime.time):
return type_factory.time(precision=6)
elif issubclass(value_type, decimal.Decimal):
if value is None:
return DecimalType()
else:
return DecimalType.from_value(value)
elif issubclass(value_type, uuid.UUID):
return UniqueIdentifierType()
elif issubclass(value_type, TableValuedParam):
columns = value.columns
rows = value.rows
if columns is None:
# trying to auto detect columns using data from first row
if rows is None:
# rows are not present too, this means
# entire tvp has value of NULL
pass
else:
# use first row to infer types of columns
row = value.peek_row()
columns = []
try:
cell_iter = iter(row)
except TypeError:
raise tds_base.DataError('Each row in table should be an iterable')
for cell in cell_iter:
if isinstance(cell, TableValuedParam):
raise tds_base.DataError('TVP type cannot have nested TVP types')
col_type = self.from_value(cell)
col = tds_base.Column(type=col_type)
columns.append(col)
return TableType(typ_schema=value.typ_schema, typ_name=value.typ_name, columns=columns)
else:
raise tds_base.DataError('Cannot infer TDS type from Python value: {!r}'.format(value)) | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/tds_types.py | tds_types.py |
from collections import deque
import datetime
import errno
import keyword
import os
import re
import six
import socket
import uuid
import warnings
import weakref
import logging
from six.moves import xrange
from pytds.tds_types import NVarCharType
from . import lcid
import pytds.tz
from .tds import (
_TdsSocket, tds7_get_instances,
_create_exception_by_message,
output, default
)
from . import tds_base
from .tds_base import (
Error, LoginError, DatabaseError, ProgrammingError,
IntegrityError, DataError, InternalError,
InterfaceError, TimeoutError, OperationalError,
NotSupportedError, Warning, ClosedConnectionError,
Column,
PreLoginEnc)
from .tds_types import (
TableValuedParam, Binary
)
from .tds_base import (
ROWID, DECIMAL, STRING, BINARY, NUMBER, DATETIME, INTEGER, REAL, XML
)
from . import tls
__author__ = 'Mikhail Denisenko <[email protected]>'
__version__ = '1.11.1'
logger = logging.getLogger(__name__)
def _ver_to_int(ver):
res = ver.split('.')
if len(res) < 2:
logger.warning('Invalid version {}, it should have 2 parts at least separated by "."'.format(ver))
return 0
maj, minor, _ = ver.split('.')
return (int(maj) << 24) + (int(minor) << 16)
intversion = _ver_to_int(__version__)
#: Compliant with DB SIG 2.0
apilevel = '2.0'
#: Module may be shared, but not connections
threadsafety = 1
#: This module uses extended python format codes
paramstyle = 'pyformat'
class _TdsLogin:
pass
def tuple_row_strategy(column_names):
""" Tuple row strategy, rows returned as tuples, default
"""
return tuple
def list_row_strategy(column_names):
""" List row strategy, rows returned as lists
"""
return list
def dict_row_strategy(column_names):
""" Dict row strategy, rows returned as dictionaries
"""
# replace empty column names with indices
column_names = [(name or idx) for idx, name in enumerate(column_names)]
def row_factory(row):
return dict(zip(column_names, row))
return row_factory
def is_valid_identifier(name):
return name and re.match("^[_A-Za-z][_a-zA-Z0-9]*$", name) and not keyword.iskeyword(name)
def namedtuple_row_strategy(column_names):
""" Namedtuple row strategy, rows returned as named tuples
Column names that are not valid Python identifiers will be replaced
with col<number>_
"""
import collections
# replace empty column names with placeholders
column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)]
row_class = collections.namedtuple('Row', column_names)
def row_factory(row):
return row_class(*row)
return row_factory
def recordtype_row_strategy(column_names):
""" Recordtype row strategy, rows returned as recordtypes
Column names that are not valid Python identifiers will be replaced
with col<number>_
"""
try:
from namedlist import namedlist as recordtype # optional dependency
except ImportError:
from recordtype import recordtype # optional dependency
# replace empty column names with placeholders
column_names = [name if is_valid_identifier(name) else 'col%s_' % idx for idx, name in enumerate(column_names)]
recordtype_row_class = recordtype('Row', column_names)
# custom extension class that supports indexing
class Row(recordtype_row_class):
def __getitem__(self, index):
if isinstance(index, slice):
return tuple(getattr(self, x) for x in self.__slots__[index])
return getattr(self, self.__slots__[index])
def __setitem__(self, index, value):
setattr(self, self.__slots__[index], value)
def row_factory(row):
return Row(*row)
return row_factory
class _ConnectionPool(object):
def __init__(self, max_pool_size=100, min_pool_size=0):
self._max_pool_size = max_pool_size
self._pool = {}
def add(self, key, conn):
l = self._pool.setdefault(key, []).append(conn)
def take(self, key):
l = self._pool.get(key, [])
if len(l) > 0:
return l.pop()
else:
return None
_connection_pool = _ConnectionPool()
class Connection(object):
"""Connection object, this object should be created by calling :func:`connect`"""
def __init__(self):
self._closed = False
self._conn = None
self._isolation_level = 0
self._autocommit = True
self._row_strategy = tuple_row_strategy
self._login = None
self._use_tz = None
self._tzinfo_factory = None
self._key = None
self._pooling = False
@property
def as_dict(self):
"""
Instructs all cursors this connection creates to return results
as a dictionary rather than a tuple.
"""
return self._row_strategy == dict_row_strategy
@as_dict.setter
def as_dict(self, value):
if value:
self._row_strategy = dict_row_strategy
else:
self._row_strategy = tuple_row_strategy
@property
def autocommit_state(self):
"""
An alias for `autocommit`, provided for compatibility with pymssql
"""
return self._autocommit
def set_autocommit(self, value):
""" An alias for `autocommit`, provided for compatibility with ADO dbapi
"""
self.autocommit = value
@property
def autocommit(self):
"""
The current state of autocommit on the connection.
"""
return self._autocommit
@autocommit.setter
def autocommit(self, value):
if self._autocommit != value:
if value:
if self._conn.tds72_transaction:
self._main_cursor._rollback(cont=False)
else:
self._main_cursor._begin_tran(isolation_level=self._isolation_level)
self._autocommit = value
@property
def isolation_level(self):
"""Isolation level for transactions,
for possible values see :ref:`isolation-level-constants`
.. seealso:: `SET TRANSACTION ISOLATION LEVEL`__ in MSSQL documentation
.. __: http://msdn.microsoft.com/en-us/library/ms173763.aspx
"""
return self._isolation_level
@isolation_level.setter
def isolation_level(self, level):
self._isolation_level = level
def _assert_open(self):
if self._closed:
raise Error('Connection closed')
if not self._conn or not self._conn.is_connected():
self._open()
def _trancount(self):
with self.cursor() as cur:
cur.execute('select @@trancount')
return cur.fetchone()[0]
@property
def tds_version(self):
"""
Version of tds protocol that is being used by this connection
"""
self._assert_open()
return self._conn.tds_version
@property
def product_version(self):
"""
Version of the MSSQL server
"""
self._assert_open()
return self._conn.product_version
@property
def mars_enabled(self):
""" Whether MARS is enabled or not on connection
"""
return self._conn.mars_enabled
def _connect(self, host, port, instance, timeout, sock=None):
login = self._login
try:
login.server_name = host
login.instance_name = instance
port = _resolve_instance_port(
host,
port,
instance,
timeout=timeout)
if not sock:
logger.info('Opening socket to %s:%d', host, port)
sock = socket.create_connection((host, port), timeout)
except Exception as e:
raise LoginError("Cannot connect to server '{0}': {1}".format(host, e), e)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
# default keep alive should be 30 seconds according to spec:
# https://msdn.microsoft.com/en-us/library/dd341108.aspx
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 30)
sock.settimeout(timeout)
conn = _TdsSocket(self._use_tz)
self._conn = conn
try:
route = conn.login(login, sock, self._tzinfo_factory)
if route is not None:
# rerouted to different server
sock.close()
self._connect(host=route['server'],
port=route['port'],
instance=instance,
timeout=timeout)
return
if conn.mars_enabled:
cursor = _MarsCursor(
self,
conn.create_session(self._tzinfo_factory),
self._tzinfo_factory)
else:
cursor = Cursor(
self,
conn.main_session,
self._tzinfo_factory)
self._active_cursor = self._main_cursor = cursor
if not self._autocommit:
cursor._session.begin_tran(isolation_level=self._isolation_level)
sock.settimeout(login.query_timeout)
except:
sock.close()
raise
def _try_open(self, timeout, sock=None):
if self._pooling:
res = _connection_pool.take(self._key)
if res is not None:
self._conn, sess = res
if self._conn.mars_enabled:
cursor = _MarsCursor(
self,
sess,
self._tzinfo_factory)
else:
cursor = Cursor(
self,
sess,
self._tzinfo_factory)
self._active_cursor = self._main_cursor = cursor
cursor.callproc('sp_reset_connection')
return
login = self._login
host, port, instance = login.servers[0]
self._connect(host=host, port=port, instance=instance, timeout=timeout, sock=sock)
def _open(self, sock=None):
import time
self._conn = None
self._dirty = False
login = self._login
connect_timeout = login.connect_timeout
# using retry algorithm specified in
# http://msdn.microsoft.com/en-us/library/ms175484.aspx
retry_time = 0.08 * connect_timeout
retry_delay = 0.2
last_error = None
end_time = time.time() + connect_timeout
while True:
for _ in xrange(len(login.servers)):
try:
self._try_open(timeout=retry_time, sock=sock)
return
except OperationalError as e:
last_error = e
# if there are more than one message this means
# that the login was successful, like in the
# case when database is not accessible
# mssql returns 2 messages:
# 1) Cannot open database "<dbname>" requested by the login. The login failed.
# 2) Login failed for user '<username>'
# in this case we want to retry
if self._conn is not None and len(self._conn.main_session.messages) <= 1:
# for the following error messages we don't retry
# because if the password is incorrect and we
# retry multiple times this can cause account
# to be locked
if e.msg_no in (
18456, # login failed
18486, # account is locked
18487, # password expired
18488, # password should be changed
18452, # login from untrusted domain
):
raise
if time.time() > end_time:
raise last_error
login.servers.rotate(-1)
time.sleep(retry_delay)
retry_time += 0.08 * connect_timeout
retry_delay = min(1, retry_delay * 2)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def commit(self):
"""
Commit transaction which is currently in progress.
"""
self._assert_open()
if self._autocommit:
return
if not self._conn.tds72_transaction:
return
self._main_cursor._commit(cont=True, isolation_level=self._isolation_level)
def cursor(self):
"""
Return cursor object that can be used to make queries and fetch
results from the database.
"""
self._assert_open()
if self.mars_enabled:
in_tran = self._conn.tds72_transaction
if in_tran and self._dirty:
try:
return _MarsCursor(self,
self._conn.create_session(self._tzinfo_factory),
self._tzinfo_factory)
except (socket.error, OSError) as e:
self._conn.close()
raise
else:
try:
return _MarsCursor(self,
self._conn.create_session(self._tzinfo_factory),
self._tzinfo_factory)
except (socket.error, OSError) as e:
if e.errno not in (errno.EPIPE, errno.ECONNRESET):
raise
self._conn.close()
except ClosedConnectionError:
pass
self._assert_open()
return _MarsCursor(self,
self._conn.create_session(self._tzinfo_factory),
self._tzinfo_factory)
else:
return Cursor(self,
self._conn.main_session,
self._tzinfo_factory)
def rollback(self):
"""
Roll back transaction which is currently in progress.
"""
try:
if self._autocommit:
return
if not self._conn or not self._conn.is_connected():
return
if not self._conn.tds72_transaction:
return
self._main_cursor._rollback(cont=True,
isolation_level=self._isolation_level)
except socket.error as e:
if e.errno in (errno.ENETRESET, errno.ECONNRESET, errno.EPIPE):
return
self._conn.close()
raise
except ClosedConnectionError:
pass
def close(self):
""" Close connection to an MS SQL Server.
This function tries to close the connection and free all memory used.
It can be called more than once in a row. No exception is raised in
this case.
"""
if self._conn:
if self._pooling:
_connection_pool.add(self._key, (self._conn, self._main_cursor._session))
else:
self._conn.close()
self._active_cursor = None
self._main_cursor = None
self._conn = None
self._closed = True
def _try_activate_cursor(self, cursor):
if cursor is not self._active_cursor:
session = self._active_cursor._session
if session.in_cancel:
session.process_cancel()
if session.state == tds_base.TDS_PENDING:
raise InterfaceError('Results are still pending on connection')
self._active_cursor = cursor
class Cursor(six.Iterator):
"""
This class represents a database cursor, which is used to issue queries
and fetch results from a database connection.
"""
def __init__(self, conn, session, tzinfo_factory):
self._conn = weakref.ref(conn)
self.arraysize = 1
self._session = session
self._tzinfo_factory = tzinfo_factory
def _assert_open(self):
conn = self._conn
if conn is not None:
conn = conn()
if not conn:
raise InterfaceError('Cursor is closed')
conn._assert_open()
self._session = conn._conn._main_session
return conn
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
"""
Return self to make cursors compatibile with Python iteration
protocol.
"""
return self
def _setup_row_factory(self):
self._row_factory = None
conn = self._conn()
if self._session.res_info:
column_names = [col[0] for col in self._session.res_info.description]
self._row_factory = conn._row_strategy(column_names)
def _callproc(self, procname, parameters):
self._ensure_transaction()
results = list(parameters)
parameters = self._session._convert_params(parameters)
self._exec_with_retry(lambda: self._session.submit_rpc(procname, parameters, 0))
self._session.process_rpc()
for key, param in self._session.output_params.items():
results[key] = param.value
self._setup_row_factory()
return results
def get_proc_outputs(self):
"""
If stored procedure has result sets and OUTPUT parameters use this method
after you processed all result sets to get values of OUTPUT parameters.
:return: A list of output parameter values.
"""
self._session.complete_rpc()
results = [None] * len(self._session.output_params.items())
for key, param in self._session.output_params.items():
results[key] = param.value
return results
def callproc(self, procname, parameters=()):
"""
Call a stored procedure with the given name.
:param procname: The name of the procedure to call
:type procname: str
:keyword parameters: The optional parameters for the procedure
:type parameters: sequence
Note: If stored procedure has OUTPUT parameters and result sets this
method will not return values for OUTPUT parameters, you should
call get_proc_outputs to get values for OUTPUT parameters.
"""
conn = self._assert_open()
conn._try_activate_cursor(self)
return self._callproc(procname, parameters)
@property
def return_value(self):
""" Alias to :func:`get_proc_return_status`
"""
return self.get_proc_return_status()
@property
def connection(self):
""" Provides link back to :class:`Connection` of this cursor
"""
return self._conn()
@property
def spid(self):
""" MSSQL Server's SPID (session id)
"""
return self._session._spid
def _get_tzinfo_factory(self):
return self._tzinfo_factory
def _set_tzinfo_factory(self, tzinfo_factory):
self._tzinfo_factory = self._session.tzinfo_factory = tzinfo_factory
tzinfo_factory = property(_get_tzinfo_factory, _set_tzinfo_factory)
def get_proc_return_status(self):
""" Last stored proc result
"""
if self._session is None:
return None
if not self._session.has_status:
self._session.find_return_status()
return self._session.ret_status if self._session.has_status else None
def cancel(self):
""" Cancel current statement
"""
conn = self._assert_open()
conn._try_activate_cursor(self)
self._session.cancel_if_pending()
def close(self):
"""
Closes the cursor. The cursor is unusable from this point.
"""
conn = self._conn
if conn is not None:
conn = conn()
if conn is not None:
if self is conn._active_cursor:
conn._active_cursor = conn._main_cursor
self._session = None
self._conn = None
def _exec_with_retry(self, fun):
conn = self._assert_open()
in_tran = conn._conn.tds72_transaction
if in_tran and conn._dirty:
conn._dirty = True
try:
return fun()
except socket.error as e:
if e.errno not in (errno.ECONNRESET, errno.EPIPE):
raise
conn._conn.close()
else:
conn._dirty = True
try:
return fun()
except socket.error as e:
if e.errno not in (errno.ECONNRESET, errno.EPIPE):
raise
conn._conn.close()
except ClosedConnectionError:
pass
# in case of connection reset try again
conn = self._assert_open()
return fun()
def _ensure_transaction(self):
conn = self._conn()
if not conn._autocommit and not conn._conn.tds72_transaction:
conn._main_cursor._begin_tran(isolation_level=conn._isolation_level)
def _execute(self, operation, params):
self._ensure_transaction()
operation = six.text_type(operation)
if params:
named_params = {}
if isinstance(params, (list, tuple)):
names = []
pid = 1
for val in params:
if val is None:
names.append('NULL')
else:
name = '@P{0}'.format(pid)
names.append(name)
named_params[name] = val
pid += 1
if len(names) == 1:
operation = operation % names[0]
else:
operation = operation % tuple(names)
elif isinstance(params, dict):
# prepend names with @
rename = {}
for name, value in params.items():
if value is None:
rename[name] = 'NULL'
else:
mssql_name = '@{0}'.format(name)
rename[name] = mssql_name
named_params[mssql_name] = value
operation = operation % rename
if named_params:
named_params = self._session._convert_params(named_params)
param_definition = u','.join(
u'{0} {1}'.format(p.column_name, p.type.get_declaration())
for p in named_params)
self._exec_with_retry(lambda: self._session.submit_rpc(
tds_base.SP_EXECUTESQL,
[self._session.make_param('', operation), self._session.make_param('', param_definition)] + named_params,
0))
else:
self._exec_with_retry(lambda: self._session.submit_plain_query(operation))
else:
self._exec_with_retry(lambda: self._session.submit_plain_query(operation))
self._session.find_result_or_done()
self._setup_row_factory()
def execute(self, operation, params=()):
""" Execute the query
:param operation: SQL statement
:type operation: str
"""
conn = self._assert_open()
conn._try_activate_cursor(self)
self._execute(operation, params)
# for compatibility with pyodbc
return self
def _begin_tran(self, isolation_level):
conn = self._assert_open()
conn._try_activate_cursor(self)
self._session.begin_tran(isolation_level=isolation_level)
def _commit(self, cont, isolation_level=0):
conn = self._assert_open()
conn._try_activate_cursor(self)
self._session.commit(cont=cont, isolation_level=isolation_level)
conn._dirty = False
def _rollback(self, cont, isolation_level=0):
conn = self._assert_open()
conn._try_activate_cursor(self)
self._session.rollback(cont=cont, isolation_level=isolation_level)
conn._dirty = False
def executemany(self, operation, params_seq):
counts = []
for params in params_seq:
self.execute(operation, params)
if self._session.rows_affected != -1:
counts.append(self._session.rows_affected)
if counts:
self._session.rows_affected = sum(counts)
def execute_scalar(self, query_string, params=None):
"""
This method sends a query to the MS SQL Server to which this object
instance is connected, then returns first column of first row from
result. An exception is raised on failure. If there are pending
results or rows prior to executing this command, they are silently
discarded.
This method accepts Python formatting. Please see execute_query()
for details.
This method is useful if you want just a single value, as in:
``conn.execute_scalar('SELECT COUNT(*) FROM employees')``
This method works in the same way as ``iter(conn).next()[0]``.
Remaining rows, if any, can still be iterated after calling this
method.
"""
self.execute(query_string, params)
row = self.fetchone()
if not row:
return None
return row[0]
def nextset(self):
""" Move to next recordset in batch statement, all rows of current recordset are
discarded if present.
:returns: true if successful or ``None`` when there are no more recordsets
"""
res = self._session.next_set()
self._setup_row_factory()
return res
@property
def rowcount(self):
""" Number of rows affected by previous statement
:returns: -1 if this information was not supplied by MSSQL server
"""
if self._session is None:
return -1
return self._session.rows_affected
@property
def description(self):
""" Cursor description, see http://legacy.python.org/dev/peps/pep-0249/#description
"""
if self._session is None:
return None
res = self._session.res_info
if res:
return res.description
else:
return None
def set_stream(self, column_idx, stream):
if len(self._session.res_info.columns) <= column_idx or column_idx < 0:
raise ValueError('Invalid value for column_idx')
self._session.res_info.columns[column_idx].serializer.set_chunk_handler(pytds.tds_types._StreamChunkedHandler(stream))
@property
def messages(self):
""" Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages
"""
if self._session:
result = []
for msg in self._session.messages:
ex = _create_exception_by_message(msg)
result.append((type(ex), ex))
return result
else:
return None
@property
def native_description(self):
""" todo document
"""
if self._session is None:
return None
res = self._session.res_info
if res:
return res.native_descr
else:
return None
def fetchone(self):
""" Fetches next row, or ``None`` if there are no more rows
"""
row = self._session.fetchone()
if row:
return self._row_factory(row)
def fetchmany(self, size=None):
""" Fetches next multiple rows
:param size: Maximum number of rows to return, default value is cursor.arraysize
:returns: List of rows
"""
if size is None:
size = self.arraysize
rows = []
for _ in xrange(size):
row = self.fetchone()
if not row:
break
rows.append(row)
return rows
def fetchall(self):
""" Fetches all remaining rows
"""
return list(row for row in self)
def __next__(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
@staticmethod
def setinputsizes(sizes=None):
"""
This method does nothing, as permitted by DB-API specification.
"""
pass
@staticmethod
def setoutputsize(size=None, column=0):
"""
This method does nothing, as permitted by DB-API specification.
"""
pass
def copy_to(self, file=None, table_or_view=None, sep='\t', columns=None,
check_constraints=False, fire_triggers=False, keep_nulls=False,
kb_per_batch=None, rows_per_batch=None, order=None, tablock=False,
schema=None, null_string=None, data=None):
""" *Experimental*. Efficiently load data to database from file using ``BULK INSERT`` operation
:param file: Source file-like object, should be in csv format. Specify
either this or data, not both.
:param table_or_view: Destination table or view in the database
:type table_or_view: str
Optional parameters:
:keyword sep: Separator used in csv file
:type sep: str
:keyword columns: List of Column objects or column names in target
table to insert to. SQL Server will do some conversions, so these
may not have to match the actual table definition exactly.
If not provided will insert into all columns assuming nvarchar(4000)
NULL for all columns.
If only the column name is provided, the type is assumed to be
nvarchar(4000) NULL.
If rows are given with file, you cannot specify non-string data
types.
If rows are given with data, the values must be a type supported by
the serializer for the column in tds_types.
:type columns: list
:keyword check_constraints: Check table constraints for incoming data
:type check_constraints: bool
:keyword fire_triggers: Enable or disable triggers for table
:type fire_triggers: bool
:keyword keep_nulls: If enabled null values inserted as-is, instead of
inserting default value for column
:type keep_nulls: bool
:keyword kb_per_batch: Kilobytes per batch can be used to optimize performance, see MSSQL
server documentation for details
:type kb_per_batch: int
:keyword rows_per_batch: Rows per batch can be used to optimize performance, see MSSQL
server documentation for details
:type rows_per_batch: int
:keyword order: The ordering of the data in source table. List of columns with ASC or DESC suffix.
E.g. ``['order_id ASC', 'name DESC']``
Can be used to optimize performance, see MSSQL server documentation for details
:type order: list
:keyword tablock: Enable or disable table lock for the duration of bulk load
:keyword schema: Name of schema for table or view, if not specified default schema will be used
:keyword null_string: String that should be interpreted as a NULL when
reading the CSV file. Has no meaning if using data instead of file.
:keyword data: The data to insert as an iterable of rows, which are
iterables of values. Specify either this or file, not both.
"""
conn = self._conn()
rows = None
if data is None:
import csv
reader = csv.reader(file, delimiter=sep)
if null_string is not None:
def _convert_null_strings(csv_reader):
for row in csv_reader:
yield [r if r != null_string else None for r in row]
reader = _convert_null_strings(reader)
rows = reader
else:
rows = data
obj_name = tds_base.tds_quote_id(table_or_view)
if schema:
obj_name = '{0}.{1}'.format(tds_base.tds_quote_id(schema), obj_name)
if columns:
metadata = []
for column in columns:
if isinstance(column, Column):
metadata.append(column)
else:
metadata.append(Column(name=column, type=NVarCharType(size=4000), flags=Column.fNullable))
else:
self.execute('select top 1 * from {} where 1<>1'.format(obj_name))
metadata = [Column(name=col[0], type=NVarCharType(size=4000), flags=Column.fNullable if col[6] else 0)
for col in self.description]
col_defs = ','.join('{0} {1}'.format(tds_base.tds_quote_id(col.column_name), col.type.get_declaration())
for col in metadata)
with_opts = []
if check_constraints:
with_opts.append('CHECK_CONSTRAINTS')
if fire_triggers:
with_opts.append('FIRE_TRIGGERS')
if keep_nulls:
with_opts.append('KEEP_NULLS')
if kb_per_batch:
with_opts.append('KILOBYTES_PER_BATCH = {0}'.format(kb_per_batch))
if rows_per_batch:
with_opts.append('ROWS_PER_BATCH = {0}'.format(rows_per_batch))
if order:
with_opts.append('ORDER({0})'.format(','.join(order)))
if tablock:
with_opts.append('TABLOCK')
with_part = ''
if with_opts:
with_part = 'WITH ({0})'.format(','.join(with_opts))
operation = 'INSERT BULK {0}({1}) {2}'.format(obj_name, col_defs, with_part)
self.execute(operation)
self._session.submit_bulk(metadata, rows)
self._session.process_simple_request()
class _MarsCursor(Cursor):
def _assert_open(self):
conn = self._conn
if conn is not None:
conn = conn()
if not conn:
raise InterfaceError('Cursor is closed')
conn._assert_open()
if not self._session.is_connected():
self._session = conn._conn.create_session(self._tzinfo_factory)
return conn
@property
def spid(self):
# not thread safe for connection
conn = self._assert_open()
dirty = conn._dirty
spid = self.execute_scalar('select @@SPID')
conn._dirty = dirty
return spid
def cancel(self):
self._assert_open()
self._session.cancel_if_pending()
def close(self):
"""
Closes the cursor. The cursor is unusable from this point.
"""
if self._session is not None:
try:
self._session.close()
self._session = None
except socket.error as e:
if e.errno != errno.ECONNRESET:
raise
def execute(self, operation, params=()):
self._assert_open()
self._execute(operation, params)
# for compatibility with pyodbc
return self
def callproc(self, procname, parameters=()):
"""
Call a stored procedure with the given name.
:param procname: The name of the procedure to call
:type procname: str
:keyword parameters: The optional parameters for the procedure
:type parameters: sequence
"""
self._assert_open()
return self._callproc(procname, parameters)
def _begin_tran(self, isolation_level):
self._assert_open()
self._session.begin_tran(isolation_level=isolation_level)
def _commit(self, cont, isolation_level=0):
conn = self._assert_open()
self._session.commit(cont=cont, isolation_level=isolation_level)
conn._dirty = False
def _rollback(self, cont, isolation_level=0):
conn = self._assert_open()
self._session.rollback(cont=cont, isolation_level=isolation_level)
conn._dirty = False
def _resolve_instance_port(server, port, instance, timeout=5):
if instance and not port:
logger.info('querying %s for list of instances', server)
instances = tds7_get_instances(server, timeout=timeout)
if instance not in instances:
raise LoginError("Instance {0} not found on server {1}".format(instance, server))
instdict = instances[instance]
if 'tcp' not in instdict:
raise LoginError("Instance {0} doen't have tcp connections enabled".format(instance))
port = int(instdict['tcp'])
return port or 1433
def _parse_server(server):
instance = ""
if "\\" in server:
server, instance = server.split("\\")
# support MS methods of connecting locally
if server in (".", "(local)"):
server = "localhost"
return server, instance.upper()
# map to servers deques, used to store active/passive servers
# between calls to connect function
# deques are used because they can be rotated
_servers_deques = {}
def _get_servers_deque(servers, database):
""" Returns deque of servers for given tuple of servers and
database name.
This deque have active server at the begining, if first server
is not accessible at the moment the deque will be rotated,
second server will be moved to the first position, thirt to the
second position etc, and previously first server will be moved
to the last position.
This allows to remember last successful server between calls
to connect function.
"""
key = (servers, database)
if key not in _servers_deques:
_servers_deques[key] = deque(servers)
return _servers_deques[key]
def _parse_connection_string(connstr):
"""
MSSQL style connection string parser
Returns normalized dictionary of connection string parameters
"""
res = {}
for item in connstr.split(';'):
item = item.strip()
if not item:
continue
key, value = item.split('=', 1)
key = key.strip().lower().replace(' ', '_')
value = value.strip()
res[key] = value
return res
def connect(dsn=None, database=None, user=None, password=None, timeout=None,
login_timeout=15, as_dict=None,
appname=None, port=None, tds_version=tds_base.TDS74,
autocommit=False,
blocksize=4096, use_mars=False, auth=None, readonly=False,
load_balancer=None, use_tz=None, bytes_to_unicode=True,
row_strategy=None, failover_partner=None, server=None,
cafile=None, sock=None, validate_host=True,
enc_login_only=False, disable_connect_retry=False,
pooling=False,
use_sso=False,
):
"""
Opens connection to the database
:keyword dsn: SQL server host and instance: <host>[\\<instance>]
:type dsn: string
:keyword failover_partner: secondary database host, used if primary is not accessible
:type failover_partner: string
:keyword database: the database to initially connect to
:type database: string
:keyword user: database user to connect as
:type user: string
:keyword password: user's password
:type password: string
:keyword timeout: query timeout in seconds, default 0 (no timeout)
:type timeout: int
:keyword login_timeout: timeout for connection and login in seconds, default 15
:type login_timeout: int
:keyword as_dict: whether rows should be returned as dictionaries instead of tuples.
:type as_dict: boolean
:keyword appname: Set the application name to use for the connection
:type appname: string
:keyword port: the TCP port to use to connect to the server
:type port: int
:keyword tds_version: Maximum TDS version to use, should only be used for testing
:type tds_version: int
:keyword autocommit: Enable or disable database level autocommit
:type autocommit: bool
:keyword blocksize: Size of block for the TDS protocol, usually should not be used
:type blocksize: int
:keyword use_mars: Enable or disable MARS
:type use_mars: bool
:keyword auth: An instance of authentication method class, e.g. Ntlm or Sspi
:keyword readonly: Allows to enable read-only mode for connection, only supported by MSSQL 2012,
earlier versions will ignore this parameter
:type readonly: bool
:keyword load_balancer: An instance of load balancer class to use, if not provided will not use load balancer
:keyword use_tz: Provides timezone for naive database times, if not provided date and time will be returned
in naive format
:keyword bytes_to_unicode: If true single byte database strings will be converted to unicode Python strings,
otherwise will return strings as ``bytes`` without conversion.
:type bytes_to_unicode: bool
:keyword row_strategy: strategy used to create rows, determines type of returned rows, can be custom or one of:
:func:`tuple_row_strategy`, :func:`list_row_strategy`, :func:`dict_row_strategy`,
:func:`namedtuple_row_strategy`, :func:`recordtype_row_strategy`
:type row_strategy: function of list of column names returning row factory
:keyword cafile: Name of the file containing trusted CAs in PEM format, if provided will enable TLS
:type cafile: str
:keyword validate_host: Host name validation during TLS connection is enabled by default, if you disable it you
will be vulnerable to MitM type of attack.
:type validate_host: bool
:keyword enc_login_only: Allows you to scope TLS encryption only to an authentication portion. This means that
anyone who can observe traffic on your network will be able to see all your SQL requests and potentially modify
them.
:type enc_login_only: bool
:keyword use_sso: Enables SSO login, e.g. Kerberos using SSPI on Windows and kerberos package on other platforms.
Cannot be used together with auth parameter.
:returns: An instance of :class:`Connection`
"""
if use_sso and auth:
raise ValueError('use_sso cannot be used with auth parameter defined')
login = _TdsLogin()
login.client_host_name = socket.gethostname()[:128]
login.library = "Python TDS Library"
login.user_name = user or ''
login.password = password or ''
login.app_name = appname or 'pytds'
login.port = port
login.language = '' # use database default
login.attach_db_file = ''
login.tds_version = tds_version
if tds_version < tds_base.TDS70:
raise ValueError('This TDS version is not supported')
login.database = database or ''
login.bulk_copy = False
login.client_lcid = lcid.LANGID_ENGLISH_US
login.use_mars = use_mars
login.pid = os.getpid()
login.change_password = ''
login.client_id = uuid.getnode() # client mac address
login.cafile = cafile
login.validate_host = validate_host
login.enc_login_only = enc_login_only
if cafile:
if not tls.OPENSSL_AVAILABLE:
raise ValueError("You are trying to use encryption but pyOpenSSL does not work, you probably "
"need to install it first")
login.tls_ctx = tls.create_context(cafile)
if login.enc_login_only:
login.enc_flag = PreLoginEnc.ENCRYPT_OFF
else:
login.enc_flag = PreLoginEnc.ENCRYPT_ON
else:
login.tls_ctx = None
login.enc_flag = PreLoginEnc.ENCRYPT_NOT_SUP
if use_tz:
login.client_tz = use_tz
else:
login.client_tz = pytds.tz.local
# that will set:
# ANSI_DEFAULTS to ON,
# IMPLICIT_TRANSACTIONS to OFF,
# TEXTSIZE to 0x7FFFFFFF (2GB) (TDS 7.2 and below), TEXTSIZE to infinite (introduced in TDS 7.3),
# and ROWCOUNT to infinite
login.option_flag2 = tds_base.TDS_ODBC_ON
login.connect_timeout = login_timeout
login.query_timeout = timeout
login.blocksize = blocksize
login.readonly = readonly
login.load_balancer = load_balancer
login.bytes_to_unicode = bytes_to_unicode
if server and dsn:
raise ValueError("Both server and dsn shouldn't be specified")
if server:
warnings.warn("server parameter is deprecated, use dsn instead", DeprecationWarning)
dsn = server
if load_balancer and failover_partner:
raise ValueError("Both load_balancer and failover_partner shoudln't be specified")
if load_balancer:
servers = [(srv, None) for srv in load_balancer.choose()]
else:
servers = [(dsn or 'localhost', port)]
if failover_partner:
servers.append((failover_partner, port))
parsed_servers = []
for srv, port in servers:
host, instance = _parse_server(srv)
if instance and port:
raise ValueError("Both instance and port shouldn't be specified")
parsed_servers.append((host, port, instance))
if use_sso:
spn = "MSSQLSvc@{}:{}".format(parsed_servers[0][0], parsed_servers[0][1])
from . import login as pytds_login
try:
login.auth = pytds_login.SspiAuth(spn=spn)
except ImportError:
login.auth = pytds_login.KerberosAuth(spn)
else:
login.auth = auth
login.servers = _get_servers_deque(tuple(parsed_servers), database)
# unique connection identifier used to pool connection
key = (
dsn,
login.user_name,
login.app_name,
login.tds_version,
login.database,
login.client_lcid,
login.use_mars,
login.cafile,
login.blocksize,
login.readonly,
login.bytes_to_unicode,
login.auth,
login.client_tz,
autocommit,
)
conn = Connection()
conn._use_tz = use_tz
conn._autocommit = autocommit
conn._login = login
conn._pooling = pooling
conn._key = key
assert row_strategy is None or as_dict is None,\
'Both row_startegy and as_dict were specified, you should use either one or another'
if as_dict is not None:
conn.as_dict = as_dict
elif row_strategy is not None:
conn._row_strategy = row_strategy
else:
conn._row_strategy = tuple_row_strategy # default row strategy
conn._isolation_level = 0
conn._dirty = False
from .tz import FixedOffsetTimezone
conn._tzinfo_factory = None if use_tz is None else FixedOffsetTimezone
if disable_connect_retry:
conn._try_open(timeout=login.connect_timeout, sock=sock)
else:
conn._open(sock=sock)
return conn
def Date(year, month, day):
return datetime.date(year, month, day)
def DateFromTicks(ticks):
return datetime.date.fromtimestamp(ticks)
def Time(hour, minute, second, microsecond=0, tzinfo=None):
return datetime.time(hour, minute, second, microsecond, tzinfo)
def TimeFromTicks(ticks):
import time
return Time(*time.localtime(ticks)[3:6])
def Timestamp(year, month, day, hour, minute, second, microseconds=0, tzinfo=None):
return datetime.datetime(year, month, day, hour, minute, second, microseconds, tzinfo)
def TimestampFromTicks(ticks):
return datetime.datetime.fromtimestamp(ticks) | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/__init__.py | __init__.py |
__docformat__ = "restructuredtext en"
__all__ = [
"LANGID_AFRIKAANS", "LANGID_ALBANIAN", "LANGID_AMHARIC", "LANGID_ARABIC",
"LANGID_ARABIC_ALGERIA", "LANGID_ARABIC_BAHRAIN", "LANGID_ARABIC_EGYPT",
"LANGID_ARABIC_IRAQ", "LANGID_ARABIC_JORDAN", "LANGID_ARABIC_KUWAIT",
"LANGID_ARABIC_LEBANON", "LANGID_ARABIC_LIBYA", "LANGID_ARABIC_MOROCCO",
"LANGID_ARABIC_OMAN", "LANGID_ARABIC_QATAR", "LANGID_ARABIC_SYRIA",
"LANGID_ARABIC_TUNISIA", "LANGID_ARABIC_UAE", "LANGID_ARABIC_YEMEN",
"LANGID_ARMENIAN", "LANGID_ASSAMESE", "LANGID_AZERI_CYRILLIC",
"LANGID_AZERI_LATIN", "LANGID_BASQUE", "LANGID_BELGIAN_DUTCH",
"LANGID_BELGIAN_FRENCH", "LANGID_BENGALI", "LANGID_BULGARIAN",
"LANGID_BURMESE", "LANGID_BYELORUSSIAN", "LANGID_CATALAN",
"LANGID_CHEROKEE", "LANGID_CHINESE_HONG_KONG_SAR",
"LANGID_CHINESE_MACAO_SAR", "LANGID_CHINESE_SINGAPORE", "LANGID_CROATIAN",
"LANGID_CZECH", "LANGID_DANISH", "LANGID_DIVEHI", "LANGID_DUTCH",
"LANGID_EDO", "LANGID_ENGLISH_AUS", "LANGID_ENGLISH_BELIZE",
"LANGID_ENGLISH_CANADIAN", "LANGID_ENGLISH_CARIBBEAN",
"LANGID_ENGLISH_INDONESIA", "LANGID_ENGLISH_IRELAND",
"LANGID_ENGLISH_JAMAICA", "LANGID_ENGLISH_NEW_ZEALAND",
"LANGID_ENGLISH_PHILIPPINES", "LANGID_ENGLISH_SOUTH_AFRICA",
"LANGID_ENGLISH_TRINIDAD_TOBAGO", "LANGID_ENGLISH_UK", "LANGID_ENGLISH_US",
"LANGID_ENGLISH_ZIMBABWE", "LANGID_ESTONIAN", "LANGID_FAEROESE",
"LANGID_FILIPINO", "LANGID_FINNISH", "LANGID_FRENCH",
"LANGID_FRENCH_CAMEROON", "LANGID_FRENCH_CANADIAN",
"LANGID_FRENCH_CONGO_D_R_C", "LANGID_FRENCH_COTED_IVOIRE",
"LANGID_FRENCH_HAITI", "LANGID_FRENCH_LUXEMBOURG", "LANGID_FRENCH_MALI",
"LANGID_FRENCH_MONACO", "LANGID_FRENCH_MOROCCO", "LANGID_FRENCH_REUNION",
"LANGID_FRENCH_SENEGAL", "LANGID_FRENCH_WEST_INDIES",
"LANGID_FRISIAN_NETHERLANDS", "LANGID_FULFULDE", "LANGID_GAELIC_IRELAND",
"LANGID_GAELIC_SCOTLAND", "LANGID_GALICIAN", "LANGID_GEORGIAN",
"LANGID_GERMAN", "LANGID_GERMAN_AUSTRIA", "LANGID_GERMAN_LIECHTENSTEIN",
"LANGID_GERMAN_LUXEMBOURG", "LANGID_GREEK", "LANGID_GUARANI",
"LANGID_GUJARATI", "LANGID_HAUSA", "LANGID_HAWAIIAN", "LANGID_HEBREW",
"LANGID_HINDI", "LANGID_HUNGARIAN", "LANGID_IBIBIO", "LANGID_ICELANDIC",
"LANGID_IGBO", "LANGID_INDONESIAN", "LANGID_INUKTITUT", "LANGID_ITALIAN",
"LANGID_JAPANESE", "LANGID_KANNADA", "LANGID_KANURI", "LANGID_KASHMIRI",
"LANGID_KAZAKH", "LANGID_KHMER", "LANGID_KIRGHIZ", "LANGID_KONKANI",
"LANGID_KOREAN", "LANGID_KYRGYZ", "LANGID_LANGUAGE_NONE", "LANGID_LAO",
"LANGID_LATIN", "LANGID_LATVIAN", "LANGID_LITHUANIAN",
"LANGID_MACEDONIAN_FYROM", "LANGID_MALAYALAM", "LANGID_MALAYSIAN",
"LANGID_MALAY_BRUNEI_DARUSSALAM", "LANGID_MALTESE", "LANGID_MANIPURI",
"LANGID_MARATHI", "LANGID_MEXICAN_SPANISH", "LANGID_MONGOLIAN",
"LANGID_NEPALI", "LANGID_NORWEGIAN_BOKMOL", "LANGID_NORWEGIAN_NYNORSK",
"LANGID_NO_PROOFING", "LANGID_ORIYA", "LANGID_OROMO", "LANGID_PASHTO",
"LANGID_PERSIAN", "LANGID_POLISH", "LANGID_PORTUGUESE",
"LANGID_PORTUGUESE_BRAZIL", "LANGID_PUNJABI", "LANGID_RHAETO_ROMANIC",
"LANGID_ROMANIAN", "LANGID_ROMANIAN_MOLDOVA", "LANGID_RUSSIAN",
"LANGID_RUSSIAN_MOLDOVA", "LANGID_SAMI_LAPPISH", "LANGID_SANSKRIT",
"LANGID_SERBIAN_CYRILLIC", "LANGID_SERBIAN_LATIN", "LANGID_SESOTHO",
"LANGID_SIMPLIFIED_CHINESE", "LANGID_SINDHI", "LANGID_SINDHI_PAKISTAN",
"LANGID_SINHALESE", "LANGID_SLOVAK", "LANGID_SLOVENIAN", "LANGID_SOMALI",
"LANGID_SORBIAN", "LANGID_SPANISH", "LANGID_SPANISH_ARGENTINA",
"LANGID_SPANISH_BOLIVIA", "LANGID_SPANISH_CHILE",
"LANGID_SPANISH_COLOMBIA", "LANGID_SPANISH_COSTA_RICA",
"LANGID_SPANISH_DOMINICAN_REPUBLIC", "LANGID_SPANISH_ECUADOR",
"LANGID_SPANISH_EL_SALVADOR", "LANGID_SPANISH_GUATEMALA",
"LANGID_SPANISH_HONDURAS", "LANGID_SPANISH_MODERN_SORT",
"LANGID_SPANISH_NICARAGUA", "LANGID_SPANISH_PANAMA",
"LANGID_SPANISH_PARAGUAY", "LANGID_SPANISH_PERU",
"LANGID_SPANISH_PUERTO_RICO", "LANGID_SPANISH_URUGUAY",
"LANGID_SPANISH_VENEZUELA", "LANGID_SUTU", "LANGID_SWAHILI",
"LANGID_SWEDISH", "LANGID_SWEDISH_FINLAND", "LANGID_SWISS_FRENCH",
"LANGID_SWISS_GERMAN", "LANGID_SWISS_ITALIAN", "LANGID_SYRIAC",
"LANGID_TAJIK", "LANGID_TAMAZIGHT", "LANGID_TAMAZIGHT_LATIN",
"LANGID_TAMIL", "LANGID_TATAR", "LANGID_TELUGU", "LANGID_THAI",
"LANGID_TIBETAN", "LANGID_TIGRIGNA_ERITREA", "LANGID_TIGRIGNA_ETHIOPIC",
"LANGID_TRADITIONAL_CHINESE", "LANGID_TSONGA", "LANGID_TSWANA",
"LANGID_TURKISH", "LANGID_TURKMEN", "LANGID_UKRAINIAN", "LANGID_URDU",
"LANGID_UZBEK_CYRILLIC", "LANGID_UZBEK_LATIN", "LANGID_VENDA",
"LANGID_VIETNAMESE", "LANGID_WELSH", "LANGID_XHOSA", "LANGID_YI",
"LANGID_YIDDISH", "LANGID_YORUBA", "LANGID_ZULU",
"lang_id_names"
]
LANGID_AFRIKAANS = 1078
LANGID_ALBANIAN = 1052
LANGID_AMHARIC = 1118
LANGID_ARABIC = 1025
LANGID_ARABIC_ALGERIA = 5121
LANGID_ARABIC_BAHRAIN = 15361
LANGID_ARABIC_EGYPT = 3073
LANGID_ARABIC_IRAQ = 2049
LANGID_ARABIC_JORDAN = 11265
LANGID_ARABIC_KUWAIT = 13313
LANGID_ARABIC_LEBANON = 12289
LANGID_ARABIC_LIBYA = 4097
LANGID_ARABIC_MOROCCO = 6145
LANGID_ARABIC_OMAN = 8193
LANGID_ARABIC_QATAR = 16385
LANGID_ARABIC_SYRIA = 10241
LANGID_ARABIC_TUNISIA = 7169
LANGID_ARABIC_UAE = 14337
LANGID_ARABIC_YEMEN = 9217
LANGID_ARMENIAN = 1067
LANGID_ASSAMESE = 1101
LANGID_AZERI_CYRILLIC = 2092
LANGID_AZERI_LATIN = 1068
LANGID_BASQUE = 1069
LANGID_BELGIAN_DUTCH = 2067
LANGID_BELGIAN_FRENCH = 2060
LANGID_BENGALI = 1093
LANGID_BULGARIAN = 1026
LANGID_BURMESE = 1109
LANGID_BYELORUSSIAN = 1059
LANGID_CATALAN = 1027
LANGID_CHEROKEE = 1116
LANGID_CHINESE_HONG_KONG_SAR = 3076
LANGID_CHINESE_MACAO_SAR = 5124
LANGID_CHINESE_SINGAPORE = 4100
LANGID_CROATIAN = 1050
LANGID_CZECH = 1029
LANGID_DANISH = 1030
LANGID_DIVEHI = 1125
LANGID_DUTCH = 1043
LANGID_EDO = 1126
LANGID_ENGLISH_AUS = 3081
LANGID_ENGLISH_BELIZE = 10249
LANGID_ENGLISH_CANADIAN = 4105
LANGID_ENGLISH_CARIBBEAN = 9225
LANGID_ENGLISH_INDONESIA = 14345
LANGID_ENGLISH_IRELAND = 6153
LANGID_ENGLISH_JAMAICA = 8201
LANGID_ENGLISH_NEW_ZEALAND = 5129
LANGID_ENGLISH_PHILIPPINES = 13321
LANGID_ENGLISH_SOUTH_AFRICA = 7177
LANGID_ENGLISH_TRINIDAD_TOBAGO = 11273
LANGID_ENGLISH_UK = 2057
LANGID_ENGLISH_US = 1033
LANGID_ENGLISH_ZIMBABWE = 12297
LANGID_ESTONIAN = 1061
LANGID_FAEROESE = 1080
LANGID_FILIPINO = 1124
LANGID_FINNISH = 1035
LANGID_FRENCH = 1036
LANGID_FRENCH_CAMEROON = 11276
LANGID_FRENCH_CANADIAN = 3084
LANGID_FRENCH_CONGO_D_R_C = 9228
LANGID_FRENCH_COTED_IVOIRE = 12300
LANGID_FRENCH_HAITI = 15372
LANGID_FRENCH_LUXEMBOURG = 5132
LANGID_FRENCH_MALI = 13324
LANGID_FRENCH_MONACO = 6156
LANGID_FRENCH_MOROCCO = 14348
LANGID_FRENCH_REUNION = 8204
LANGID_FRENCH_SENEGAL = 10252
LANGID_FRENCH_WEST_INDIES = 7180
LANGID_FRISIAN_NETHERLANDS = 1122
LANGID_FULFULDE = 1127
LANGID_GAELIC_IRELAND = 2108
LANGID_GAELIC_SCOTLAND = 1084
LANGID_GALICIAN = 1110
LANGID_GEORGIAN = 1079
LANGID_GERMAN = 1031
LANGID_GERMAN_AUSTRIA = 3079
LANGID_GERMAN_LIECHTENSTEIN = 5127
LANGID_GERMAN_LUXEMBOURG = 4103
LANGID_GREEK = 1032
LANGID_GUARANI = 1140
LANGID_GUJARATI = 1095
LANGID_HAUSA = 1128
LANGID_HAWAIIAN = 1141
LANGID_HEBREW = 1037
LANGID_HINDI = 1081
LANGID_HUNGARIAN = 1038
LANGID_IBIBIO = 1129
LANGID_ICELANDIC = 1039
LANGID_IGBO = 1136
LANGID_INDONESIAN = 1057
LANGID_INUKTITUT = 1117
LANGID_ITALIAN = 1040
LANGID_JAPANESE = 1041
LANGID_KANNADA = 1099
LANGID_KANURI = 1137
LANGID_KASHMIRI = 1120
LANGID_KAZAKH = 1087
LANGID_KHMER = 1107
LANGID_KIRGHIZ = 1088
LANGID_KONKANI = 1111
LANGID_KOREAN = 1042
LANGID_KYRGYZ = 1088
LANGID_LANGUAGE_NONE = 0
LANGID_LAO = 1108
LANGID_LATIN = 1142
LANGID_LATVIAN = 1062
LANGID_LITHUANIAN = 1063
LANGID_MACEDONIAN_FYROM = 1071
LANGID_MALAYALAM = 1100
LANGID_MALAY_BRUNEI_DARUSSALAM = 2110
LANGID_MALAYSIAN = 1086
LANGID_MALTESE = 1082
LANGID_MANIPURI = 1112
LANGID_MARATHI = 1102
LANGID_MEXICAN_SPANISH = 2058
LANGID_MONGOLIAN = 1104
LANGID_NEPALI = 1121
LANGID_NO_PROOFING = 1024
LANGID_NORWEGIAN_BOKMOL = 1044
LANGID_NORWEGIAN_NYNORSK = 2068
LANGID_ORIYA = 1096
LANGID_OROMO = 1138
LANGID_PASHTO = 1123
LANGID_PERSIAN = 1065
LANGID_POLISH = 1045
LANGID_PORTUGUESE = 2070
LANGID_PORTUGUESE_BRAZIL = 1046
LANGID_PUNJABI = 1094
LANGID_RHAETO_ROMANIC = 1047
LANGID_ROMANIAN = 1048
LANGID_ROMANIAN_MOLDOVA = 2072
LANGID_RUSSIAN = 1049
LANGID_RUSSIAN_MOLDOVA = 2073
LANGID_SAMI_LAPPISH = 1083
LANGID_SANSKRIT = 1103
LANGID_SERBIAN_CYRILLIC = 3098
LANGID_SERBIAN_LATIN = 2074
LANGID_SESOTHO = 1072
LANGID_SIMPLIFIED_CHINESE = 2052
LANGID_SINDHI = 1113
LANGID_SINDHI_PAKISTAN = 2137
LANGID_SINHALESE = 1115
LANGID_SLOVAK = 1051
LANGID_SLOVENIAN = 1060
LANGID_SOMALI = 1143
LANGID_SORBIAN = 1070
LANGID_SPANISH = 1034
LANGID_SPANISH_ARGENTINA = 11274
LANGID_SPANISH_BOLIVIA = 16394
LANGID_SPANISH_CHILE = 13322
LANGID_SPANISH_COLOMBIA = 9226
LANGID_SPANISH_COSTA_RICA = 5130
LANGID_SPANISH_DOMINICAN_REPUBLIC = 7178
LANGID_SPANISH_ECUADOR = 12298
LANGID_SPANISH_EL_SALVADOR = 17418
LANGID_SPANISH_GUATEMALA = 4106
LANGID_SPANISH_HONDURAS = 18442
LANGID_SPANISH_MODERN_SORT = 3082
LANGID_SPANISH_NICARAGUA = 19466
LANGID_SPANISH_PANAMA = 6154
LANGID_SPANISH_PARAGUAY = 15370
LANGID_SPANISH_PERU = 10250
LANGID_SPANISH_PUERTO_RICO = 20490
LANGID_SPANISH_URUGUAY = 14346
LANGID_SPANISH_VENEZUELA = 8202
LANGID_SUTU = 1072
LANGID_SWAHILI = 1089
LANGID_SWEDISH = 1053
LANGID_SWEDISH_FINLAND = 2077
LANGID_SWISS_FRENCH = 4108
LANGID_SWISS_GERMAN = 2055
LANGID_SWISS_ITALIAN = 2064
LANGID_SYRIAC = 1114
LANGID_TAJIK = 1064
LANGID_TAMAZIGHT = 1119
LANGID_TAMAZIGHT_LATIN = 2143
LANGID_TAMIL = 1097
LANGID_TATAR = 1092
LANGID_TELUGU = 1098
LANGID_THAI = 1054
LANGID_TIBETAN = 1105
LANGID_TIGRIGNA_ERITREA = 2163
LANGID_TIGRIGNA_ETHIOPIC = 1139
LANGID_TRADITIONAL_CHINESE = 1028
LANGID_TSONGA = 1073
LANGID_TSWANA = 1074
LANGID_TURKISH = 1055
LANGID_TURKMEN = 1090
LANGID_UKRAINIAN = 1058
LANGID_URDU = 1056
LANGID_UZBEK_CYRILLIC = 2115
LANGID_UZBEK_LATIN = 1091
LANGID_VENDA = 1075
LANGID_VIETNAMESE = 1066
LANGID_WELSH = 1106
LANGID_XHOSA = 1076
LANGID_YI = 1144
LANGID_YIDDISH = 1085
LANGID_YORUBA = 1130
LANGID_ZULU = 1077
lang_id_names = {
LANGID_AFRIKAANS: "African",
LANGID_ALBANIAN: "Albanian",
LANGID_AMHARIC: "Amharic",
LANGID_ARABIC: "Arabic",
LANGID_ARABIC_ALGERIA: "Arabic Algerian",
LANGID_ARABIC_BAHRAIN: "Arabic Bahraini",
LANGID_ARABIC_EGYPT: "Arabic Egyptian",
LANGID_ARABIC_IRAQ: "Arabic Iraqi",
LANGID_ARABIC_JORDAN: "Arabic Jordanian",
LANGID_ARABIC_KUWAIT: "Arabic Kuwaiti",
LANGID_ARABIC_LEBANON: "Arabic Lebanese",
LANGID_ARABIC_LIBYA: "Arabic Libyan",
LANGID_ARABIC_MOROCCO: "Arabic Moroccan",
LANGID_ARABIC_OMAN: "Arabic Omani",
LANGID_ARABIC_QATAR: "Arabic Qatari",
LANGID_ARABIC_SYRIA: "Arabic Syrian",
LANGID_ARABIC_TUNISIA: "Arabic Tunisian",
LANGID_ARABIC_UAE: "Arabic United Arab Emirates",
LANGID_ARABIC_YEMEN: "Arabic Yemeni",
LANGID_ARMENIAN: "Armenian",
LANGID_ASSAMESE: "Assamese",
LANGID_AZERI_CYRILLIC: "Azeri Cyrillic",
LANGID_AZERI_LATIN: "Azeri Latin",
LANGID_BASQUE: "Basque",
LANGID_BELGIAN_DUTCH: "Belgian Dutch",
LANGID_BELGIAN_FRENCH: "Belgian French",
LANGID_BENGALI: "Bengali",
LANGID_BULGARIAN: "Bulgarian",
LANGID_BURMESE: "Burmese",
LANGID_BYELORUSSIAN: "Byelorussian",
LANGID_CATALAN: "Catalan",
LANGID_CHEROKEE: "Cherokee",
LANGID_CHINESE_HONG_KONG_SAR: "Chinese Hong Kong SAR",
LANGID_CHINESE_MACAO_SAR: "Chinese Macao SAR",
LANGID_CHINESE_SINGAPORE: "Chinese Singapore",
LANGID_CROATIAN: "Croatian",
LANGID_CZECH: "Czech",
LANGID_DANISH: "Danish",
LANGID_DIVEHI: "Divehi",
LANGID_DUTCH: "Dutch",
LANGID_EDO: "Edo",
LANGID_ENGLISH_AUS: "Australian English",
LANGID_ENGLISH_BELIZE: "Belize English",
LANGID_ENGLISH_CANADIAN: "Canadian English",
LANGID_ENGLISH_CARIBBEAN: "Caribbean English",
LANGID_ENGLISH_INDONESIA: "Indonesian English",
LANGID_ENGLISH_IRELAND: "Irish English",
LANGID_ENGLISH_JAMAICA: "Jamaican English",
LANGID_ENGLISH_NEW_ZEALAND: "New Zealand English",
LANGID_ENGLISH_PHILIPPINES: "Filipino English",
LANGID_ENGLISH_SOUTH_AFRICA: "South African English",
LANGID_ENGLISH_TRINIDAD_TOBAGO: "Tobago Trinidad English",
LANGID_ENGLISH_UK: "United Kingdom English",
LANGID_ENGLISH_US: "United States English",
LANGID_ENGLISH_ZIMBABWE: "Zimbabwe English",
LANGID_ESTONIAN: "Estonian",
LANGID_FAEROESE: "Faeroese",
LANGID_FILIPINO: "Filipino",
LANGID_FINNISH: "Finnish",
LANGID_FRENCH: "French",
LANGID_FRENCH_CAMEROON: "French Cameroon",
LANGID_FRENCH_CANADIAN: "French Canadian",
LANGID_FRENCH_CONGO_D_R_C: "French (Congo (DRC))",
LANGID_FRENCH_COTED_IVOIRE: "French Cote d'Ivoire",
LANGID_FRENCH_HAITI: "French Haiti",
LANGID_FRENCH_LUXEMBOURG: "French Luxembourg",
LANGID_FRENCH_MALI: "French Mali",
LANGID_FRENCH_MONACO: "French Monaco",
LANGID_FRENCH_MOROCCO: "French Morocco",
LANGID_FRENCH_REUNION: "French Reunion",
LANGID_FRENCH_SENEGAL: "French Senegal",
LANGID_FRENCH_WEST_INDIES: "French West Indies",
LANGID_FRISIAN_NETHERLANDS: "Frisian Netherlands",
LANGID_FULFULDE: "Fulfulde",
LANGID_GAELIC_IRELAND: "Gaelic Irish",
LANGID_GAELIC_SCOTLAND: "Gaelic Scottish",
LANGID_GALICIAN: "Galician",
LANGID_GEORGIAN: "Georgian",
LANGID_GERMAN: "German",
LANGID_GERMAN_AUSTRIA: "German Austrian",
LANGID_GERMAN_LIECHTENSTEIN: "German Liechtenstein",
LANGID_GERMAN_LUXEMBOURG: "German Luxembourg",
LANGID_GREEK: "Greek",
LANGID_GUARANI: "Guarani",
LANGID_GUJARATI: "Gujarati",
LANGID_HAUSA: "Hausa",
LANGID_HAWAIIAN: "Hawaiian",
LANGID_HEBREW: "Hebrew",
LANGID_HINDI: "Hindi",
LANGID_HUNGARIAN: "Hungarian",
LANGID_IBIBIO: "Ibibio",
LANGID_ICELANDIC: "Icelandic",
LANGID_IGBO: "Igbo",
LANGID_INDONESIAN: "Indonesian",
LANGID_INUKTITUT: "Inuktitut",
LANGID_ITALIAN: "Italian",
LANGID_JAPANESE: "Japanese",
LANGID_KANNADA: "Kannada",
LANGID_KANURI: "Kanuri",
LANGID_KASHMIRI: "Kashmiri",
LANGID_KAZAKH: "Kazakh",
LANGID_KHMER: "Khmer",
LANGID_KIRGHIZ: "Kirghiz",
LANGID_KONKANI: "Konkani",
LANGID_KOREAN: "Korean",
LANGID_KYRGYZ: "Kyrgyz",
LANGID_LANGUAGE_NONE: "No specified",
LANGID_LAO: "Lao",
LANGID_LATIN: "Latin",
LANGID_LATVIAN: "Latvian",
LANGID_LITHUANIAN: "Lithuanian",
LANGID_MACEDONIAN_FYROM: "Macedonian (FYROM)",
LANGID_MALAYALAM: "Malayalam",
LANGID_MALAY_BRUNEI_DARUSSALAM: "Malay Brunei Darussalam",
LANGID_MALAYSIAN: "Malaysian",
LANGID_MALTESE: "Maltese",
LANGID_MANIPURI: "Manipuri",
LANGID_MARATHI: "Marathi",
LANGID_MEXICAN_SPANISH: "Mexican Spanish",
LANGID_MONGOLIAN: "Mongolian",
LANGID_NEPALI: "Nepali",
LANGID_NO_PROOFING: "Disables proofing",
LANGID_NORWEGIAN_BOKMOL: "Norwegian Bokmol",
LANGID_NORWEGIAN_NYNORSK: "Norwegian Nynorsk",
LANGID_ORIYA: "Oriya",
LANGID_OROMO: "Oromo",
LANGID_PASHTO: "Pashto",
LANGID_PERSIAN: "Persian",
LANGID_POLISH: "Polish",
LANGID_PORTUGUESE: "Portuguese",
LANGID_PORTUGUESE_BRAZIL: "Portuguese (Brazil)",
LANGID_PUNJABI: "Punjabi",
LANGID_RHAETO_ROMANIC: "Rhaeto Romanic",
LANGID_ROMANIAN: "Romanian",
LANGID_ROMANIAN_MOLDOVA: "Romanian Moldova",
LANGID_RUSSIAN: "Russian",
LANGID_RUSSIAN_MOLDOVA: "Russian Moldova",
LANGID_SAMI_LAPPISH: "Sami Lappish",
LANGID_SANSKRIT: "Sanskrit",
LANGID_SERBIAN_CYRILLIC: "Serbian Cyrillic",
LANGID_SERBIAN_LATIN: "Serbian Latin",
LANGID_SESOTHO: "Sesotho",
LANGID_SIMPLIFIED_CHINESE: "Simplified Chinese",
LANGID_SINDHI: "Sindhi",
LANGID_SINDHI_PAKISTAN: "Sindhi (Pakistan)",
LANGID_SINHALESE: "Sinhalese",
LANGID_SLOVAK: "Slovakian",
LANGID_SLOVENIAN: "Slovenian",
LANGID_SOMALI: "Somali",
LANGID_SORBIAN: "Sorbian",
LANGID_SPANISH: "Spanish",
LANGID_SPANISH_ARGENTINA: "Spanish Argentina",
LANGID_SPANISH_BOLIVIA: "Spanish Bolivian",
LANGID_SPANISH_CHILE: "Spanish Chilean",
LANGID_SPANISH_COLOMBIA: "Spanish Colombian",
LANGID_SPANISH_COSTA_RICA: "Spanish Costa Rican",
LANGID_SPANISH_DOMINICAN_REPUBLIC: "Spanish Dominican Republic",
LANGID_SPANISH_ECUADOR: "Spanish Ecuadorian",
LANGID_SPANISH_EL_SALVADOR: "Spanish El Salvadorian",
LANGID_SPANISH_GUATEMALA: "Spanish Guatemala",
LANGID_SPANISH_HONDURAS: "Spanish Honduran",
LANGID_SPANISH_MODERN_SORT: "Spanish Modern Sort",
LANGID_SPANISH_NICARAGUA: "Spanish Nicaraguan",
LANGID_SPANISH_PANAMA: "Spanish Panamanian",
LANGID_SPANISH_PARAGUAY: "Spanish Paraguayan",
LANGID_SPANISH_PERU: "Spanish Peruvian",
LANGID_SPANISH_PUERTO_RICO: "Spanish Puerto Rican",
LANGID_SPANISH_URUGUAY: "Spanish Uruguayan",
LANGID_SPANISH_VENEZUELA: "Spanish Venezuelan",
LANGID_SUTU: "Sutu",
LANGID_SWAHILI: "Swahili",
LANGID_SWEDISH: "Swedish",
LANGID_SWEDISH_FINLAND: "Swedish Finnish",
LANGID_SWISS_FRENCH: "Swiss French",
LANGID_SWISS_GERMAN: "Swiss German",
LANGID_SWISS_ITALIAN: "Swiss Italian",
LANGID_SYRIAC: "Syriac",
LANGID_TAJIK: "Tajik",
LANGID_TAMAZIGHT: "Tamazight",
LANGID_TAMAZIGHT_LATIN: "Tamazight Latin",
LANGID_TAMIL: "Tamil",
LANGID_TATAR: "Tatar",
LANGID_TELUGU: "Telugu",
LANGID_THAI: "Thai",
LANGID_TIBETAN: "Tibetan",
LANGID_TIGRIGNA_ERITREA: "Tigrigna Eritrea",
LANGID_TIGRIGNA_ETHIOPIC: "Tigrigna Ethiopic",
LANGID_TRADITIONAL_CHINESE: "Traditional Chinese",
LANGID_TSONGA: "Tsonga",
LANGID_TSWANA: "Tswana",
LANGID_TURKISH: "Turkish",
LANGID_TURKMEN: "Turkmen",
LANGID_UKRAINIAN: "Ukrainian",
LANGID_URDU: "Urdu",
LANGID_UZBEK_CYRILLIC: "Uzbek Cyrillic",
LANGID_UZBEK_LATIN: "Uzbek Latin",
LANGID_VENDA: "Venda",
LANGID_VIETNAMESE: "Vietnamese",
LANGID_WELSH: "Welsh",
LANGID_XHOSA: "Xhosa",
LANGID_YI: "Yi",
LANGID_YIDDISH: "Yiddish",
LANGID_YORUBA: "Yoruba",
LANGID_ZULU: "Zulu"
} | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/lcid.py | lcid.py |
import codecs
import struct
TDS_CHARSET_ISO_8859_1 = 1
TDS_CHARSET_CP1251 = 2
TDS_CHARSET_CP1252 = 3
TDS_CHARSET_UCS_2LE = 4
TDS_CHARSET_UNICODE = 5
ucs2_codec = codecs.lookup('utf_16_le')
def sortid2charset(sort_id):
sql_collate = sort_id
#
# The table from the MSQLServer reference "Windows Collation Designators"
# and from " NLS Information for Microsoft Windows XP"
#
if sql_collate in (
30, # SQL_Latin1_General_CP437_BIN
31, # SQL_Latin1_General_CP437_CS_AS
32, # SQL_Latin1_General_CP437_CI_AS
33, # SQL_Latin1_General_Pref_CP437_CI_AS
34): # SQL_Latin1_General_CP437_CI_AI
return 'CP437'
elif sql_collate in (
40, # SQL_Latin1_General_CP850_BIN
41, # SQL_Latin1_General_CP850_CS_AS
42, # SQL_Latin1_General_CP850_CI_AS
43, # SQL_Latin1_General_Pref_CP850_CI_AS
44, # SQL_Latin1_General_CP850_CI_AI
49, # SQL_1xCompat_CP850_CI_AS
55, # SQL_AltDiction_CP850_CS_AS
56, # SQL_AltDiction_Pref_CP850_CI_AS
57, # SQL_AltDiction_CP850_CI_AI
58, # SQL_Scandinavian_Pref_CP850_CI_AS
59, # SQL_Scandinavian_CP850_CS_AS
60, # SQL_Scandinavian_CP850_CI_AS
61): # SQL_AltDiction_CP850_CI_AS
return 'CP850'
elif sql_collate in (
80, # SQL_Latin1_General_1250_BIN
81, # SQL_Latin1_General_CP1250_CS_AS
82, # SQL_Latin1_General_Cp1250_CI_AS_KI_WI
83, # SQL_Czech_Cp1250_CS_AS_KI_WI
84, # SQL_Czech_Cp1250_CI_AS_KI_WI
85, # SQL_Hungarian_Cp1250_CS_AS_KI_WI
86, # SQL_Hungarian_Cp1250_CI_AS_KI_WI
87, # SQL_Polish_Cp1250_CS_AS_KI_WI
88, # SQL_Polish_Cp1250_CI_AS_KI_WI
89, # SQL_Romanian_Cp1250_CS_AS_KI_WI
90, # SQL_Romanian_Cp1250_CI_AS_KI_WI
91, # SQL_Croatian_Cp1250_CS_AS_KI_WI
92, # SQL_Croatian_Cp1250_CI_AS_KI_WI
93, # SQL_Slovak_Cp1250_CS_AS_KI_WI
94, # SQL_Slovak_Cp1250_CI_AS_KI_WI
95, # SQL_Slovenian_Cp1250_CS_AS_KI_WI
96, # SQL_Slovenian_Cp1250_CI_AS_KI_WI
):
return 'CP1250'
elif sql_collate in (
104, # SQL_Latin1_General_1251_BIN
105, # SQL_Latin1_General_CP1251_CS_AS
106, # SQL_Latin1_General_CP1251_CI_AS
107, # SQL_Ukrainian_Cp1251_CS_AS_KI_WI
108, # SQL_Ukrainian_Cp1251_CI_AS_KI_WI
):
return 'CP1251'
elif sql_collate in (
51, # SQL_Latin1_General_Cp1_CS_AS_KI_WI
52, # SQL_Latin1_General_Cp1_CI_AS_KI_WI
53, # SQL_Latin1_General_Pref_Cp1_CI_AS_KI_WI
54, # SQL_Latin1_General_Cp1_CI_AI_KI_WI
183, # SQL_Danish_Pref_Cp1_CI_AS_KI_WI
184, # SQL_SwedishPhone_Pref_Cp1_CI_AS_KI_WI
185, # SQL_SwedishStd_Pref_Cp1_CI_AS_KI_WI
186, # SQL_Icelandic_Pref_Cp1_CI_AS_KI_WI
):
return 'CP1252'
elif sql_collate in (
112, # SQL_Latin1_General_1253_BIN
113, # SQL_Latin1_General_CP1253_CS_AS
114, # SQL_Latin1_General_CP1253_CI_AS
120, # SQL_MixDiction_CP1253_CS_AS
121, # SQL_AltDiction_CP1253_CS_AS
122, # SQL_AltDiction2_CP1253_CS_AS
124, # SQL_Latin1_General_CP1253_CI_AI
):
return 'CP1253'
elif sql_collate in (
128, # SQL_Latin1_General_1254_BIN
129, # SQL_Latin1_General_Cp1254_CS_AS_KI_WI
130, # SQL_Latin1_General_Cp1254_CI_AS_KI_WI
):
return 'CP1254'
elif sql_collate in (
136, # SQL_Latin1_General_1255_BIN
137, # SQL_Latin1_General_CP1255_CS_AS
138, # SQL_Latin1_General_CP1255_CI_AS
):
return 'CP1255'
elif sql_collate in (
144, # SQL_Latin1_General_1256_BIN
145, # SQL_Latin1_General_CP1256_CS_AS
146, # SQL_Latin1_General_CP1256_CI_AS
):
return 'CP1256'
elif sql_collate in (
152, # SQL_Latin1_General_1257_BIN
153, # SQL_Latin1_General_CP1257_CS_AS
154, # SQL_Latin1_General_CP1257_CI_AS
155, # SQL_Estonian_Cp1257_CS_AS_KI_WI
156, # SQL_Estonian_Cp1257_CI_AS_KI_WI
157, # SQL_Latvian_Cp1257_CS_AS_KI_WI
158, # SQL_Latvian_Cp1257_CI_AS_KI_WI
159, # SQL_Lithuanian_Cp1257_CS_AS_KI_WI
160, # SQL_Lithuanian_Cp1257_CI_AS_KI_WI
):
return 'CP1257'
else:
raise Exception("Invalid collation: 0x%X" % (sql_collate, ))
def lcid2charset(lcid):
if lcid in (0x405,
0x40e, # 0x1040e
0x415, 0x418, 0x41a, 0x41b, 0x41c, 0x424,
# 0x81a, seem wrong in XP table TODO check
0x104e):
return 'CP1250'
elif lcid in (0x402, 0x419, 0x422, 0x423, 0x42f, 0x43f,
0x440, 0x444, 0x450,
0x81a, # ??
0x82c, 0x843, 0xc1a):
return 'CP1251'
elif lcid in (0x1007, 0x1009, 0x100a, 0x100c, 0x1407,
0x1409, 0x140a, 0x140c, 0x1809, 0x180a,
0x180c, 0x1c09, 0x1c0a, 0x2009, 0x200a,
0x2409, 0x240a, 0x2809, 0x280a, 0x2c09,
0x2c0a, 0x3009, 0x300a, 0x3409, 0x340a,
0x380a, 0x3c0a, 0x400a, 0x403, 0x406,
0x407, # 0x10407
0x409, 0x40a, 0x40b, 0x40c, 0x40f, 0x410,
0x413, 0x414, 0x416, 0x41d, 0x421, 0x42d,
0x436,
0x437, # 0x10437
0x438,
# 0x439, ??? Unicode only
0x43e, 0x440a, 0x441, 0x456, 0x480a,
0x4c0a, 0x500a, 0x807, 0x809, 0x80a,
0x80c, 0x810, 0x813, 0x814, 0x816,
0x81d, 0x83e, 0xc07, 0xc09, 0xc0a, 0xc0c):
return 'CP1252'
elif lcid == 0x408:
return 'CP1253'
elif lcid in (0x41f, 0x42c, 0x443):
return 'CP1254'
elif lcid == 0x40d:
return 'CP1255'
elif lcid in (0x1001, 0x1401, 0x1801, 0x1c01, 0x2001,
0x2401, 0x2801, 0x2c01, 0x3001, 0x3401,
0x3801, 0x3c01, 0x4001, 0x401, 0x420,
0x429, 0x801, 0xc01):
return 'CP1256'
elif lcid in (0x425, 0x426, 0x427,
0x827): # ??
return 'CP1257'
elif lcid == 0x42a:
return 'CP1258'
elif lcid == 0x41e:
return 'CP874'
elif lcid == 0x411: # 0x10411
return 'CP932'
elif lcid in (0x1004,
0x804): # 0x20804
return 'CP936'
elif lcid == 0x412: # 0x10412
return 'CP949'
elif lcid in (0x1404,
0x404, # 0x30404
0xc04):
return 'CP950'
else:
return 'CP1252'
class Collation(object):
_coll_struct = struct.Struct('<LB')
wire_size = _coll_struct.size
f_ignore_case = 0x100000
f_ignore_accent = 0x200000
f_ignore_width = 0x400000
f_ignore_kana = 0x800000
f_binary = 0x1000000
f_binary2 = 0x2000000
def __init__(self, lcid, sort_id, ignore_case, ignore_accent, ignore_width, ignore_kana, binary, binary2, version):
self.lcid = lcid
self.sort_id = sort_id
self.ignore_case = ignore_case
self.ignore_accent = ignore_accent
self.ignore_width = ignore_width
self.ignore_kana = ignore_kana
self.binary = binary
self.binary2 = binary2
self.version = version
def __repr__(self):
fmt = 'Collation(lcid={0}, sort_id={1}, ignore_case={2}, ignore_accent={3}, ignore_width={4},' \
' ignore_kana={5}, binary={6}, binary2={7}, version={8})'
return fmt.format(
self.lcid,
self.sort_id,
self.ignore_case,
self.ignore_accent,
self.ignore_width,
self.ignore_kana,
self.binary,
self.binary2,
self.version
)
@classmethod
def unpack(cls, b):
lump, sort_id = cls._coll_struct.unpack_from(b)
lcid = lump & 0xfffff
ignore_case = bool(lump & cls.f_ignore_case)
ignore_accent = bool(lump & cls.f_ignore_accent)
ignore_width = bool(lump & cls.f_ignore_width)
ignore_kana = bool(lump & cls.f_ignore_kana)
binary = bool(lump & cls.f_binary)
binary2 = bool(lump & cls.f_binary2)
version = (lump & 0xf0000000) >> 26
return cls(lcid=lcid,
ignore_case=ignore_case,
ignore_accent=ignore_accent,
ignore_width=ignore_width,
ignore_kana=ignore_kana,
binary=binary,
binary2=binary2,
version=version,
sort_id=sort_id)
def pack(self):
lump = 0
lump |= self.lcid & 0xfffff
lump |= (self.version << 26) & 0xf0000000
if self.ignore_case:
lump |= self.f_ignore_case
if self.ignore_accent:
lump |= self.f_ignore_accent
if self.ignore_width:
lump |= self.f_ignore_width
if self.ignore_kana:
lump |= self.f_ignore_kana
if self.binary:
lump |= self.f_binary
if self.binary2:
lump |= self.f_binary2
return self._coll_struct.pack(lump, self.sort_id)
def get_charset(self):
if self.sort_id:
return sortid2charset(self.sort_id)
else:
return lcid2charset(self.lcid)
def get_codec(self):
return codecs.lookup(self.get_charset())
# TODO: define __repr__ and __unicode__
raw_collation = Collation(0, 0, 0, 0, 0, 0, 0, 0, 0) | zato-ext-python-tds | /zato-ext-python-tds-1.11.1.tar.gz/zato-ext-python-tds-1.11.1/src/pytds/collate.py | collate.py |
# stdlib
from traceback import format_exc
# anyjson
from anyjson import dumps, loads
# bunch
from bunch import Bunch
# gevent
from gevent import sleep, spawn, spawn_later
# Zato
from zato.common import ZatoException
from zato.server.service import Service
def _retry_failed_msg(so_far, retry_repeats, service_name, retry_seconds, orig_cid, e):
return '({}/{}) Retry failed for:[{}], retry_seconds:[{}], orig_cid:[{}], e:[{}]'.format(
so_far, retry_repeats, service_name, retry_seconds, orig_cid, format_exc(e))
def _retry_limit_reached_msg(retry_repeats, service_name, retry_seconds, orig_cid):
return '({}/{}) Retry limit reached for:[{}], retry_seconds:[{}], orig_cid:[{}]'.format(
retry_repeats, retry_repeats, service_name, retry_seconds, orig_cid)
class NeedsRetry(ZatoException):
def __init__(self, cid, inner_exc):
self.cid = cid
self.inner_exc = inner_exc
def __repr__(self):
return '<{} at {} cid:[{}] inner_exc:[{}]>'.format(
self.__class__.__name__, hex(id(self)), self.cid, format_exc(self.inner_exc) if self.inner_exc else None)
class RetryFailed(ZatoException):
def __init__(self, remaining, inner_exc):
self.remaining = remaining
self.inner_exc = inner_exc
def __repr__(self):
return '<{} at {} remaining:[{}] inner_exc:[{}]>'.format(
self.__class__.__name__, hex(id(self)), self.remaining, format_exc(self.inner_exc) if self.inner_exc else None)
class _InvokeRetry(Service):
name = 'zato.labs._invoke-retry'
def _retry(self, remaining):
try:
response = self.invoke(self.req_bunch.target, *self.req_bunch.args, **self.req_bunch.kwargs)
except Exception, e:
msg = _retry_failed_msg(
(self.req_bunch.retry_repeats-remaining)+1, self.req_bunch.retry_repeats,
self.req_bunch.target, self.req_bunch.retry_seconds, self.req_bunch.orig_cid, e)
self.logger.info(msg)
raise RetryFailed(remaining-1, e)
else:
return response
def _notify_callback(self, is_ok):
callback_request = {
'ok': is_ok,
'orig_cid': self.req_bunch.orig_cid,
'target': self.req_bunch.target,
'retry_seconds': self.req_bunch.retry_seconds,
'retry_repeats': self.req_bunch.retry_repeats,
'context': self.req_bunch.callback_context
}
self.invoke_async(self.req_bunch.callback, dumps(callback_request))
def _on_retry_finished(self, g):
""" A callback method invoked when a retry finishes. Will decide whether it should be
attempted to retry the invocation again or give up notifying the uses via callback
service if retry limit is reached.
"""
# Was there any exception caught when retrying?
e = g.exception
if e:
# Can we retry again?
if e.remaining:
g = spawn_later(self.req_bunch.retry_seconds, self._retry, e.remaining)
g.link(self._on_retry_finished)
# Reached the limit, warn users in logs, notify callback service and give up.
else:
msg = _retry_limit_reached_msg(self.req_bunch.retry_repeats,
self.req_bunch.target, self.req_bunch.retry_seconds, self.req_bunch.orig_cid)
self.logger.warn(msg)
self._notify_callback(False)
# Let the callback know it's all good
else:
self._notify_callback(True)
def handle(self):
# Convert to bunch so it's easier to read everything
self.req_bunch = Bunch(loads(self.request.payload))
# Initial retry linked to a retry callback
g = spawn(self._retry, self.req_bunch.retry_repeats)
g.link(self._on_retry_finished)
class InvokeRetry(Service):
""" Provides invoke_retry service that lets one invoke service with parametrized
retries.
"""
name = 'zato.labs.invoke-retry'
def _get_retry_settings(self, target, **kwargs):
async_fallback = kwargs.get('async_fallback')
callback = kwargs.get('callback')
callback_context = kwargs.get('callback_context')
retry_repeats = kwargs.get('retry_repeats')
retry_seconds = kwargs.get('retry_seconds')
retry_minutes = kwargs.get('retry_minutes')
if async_fallback:
items = ('callback', 'retry_repeats')
for item in items:
value = kwargs.get(item)
if not value:
msg = 'Could not invoke [{}], {}:[{}] was not given'.format(target, item, value)
self.logger.error(msg)
raise ValueError(msg)
if retry_seconds and retry_minutes:
msg = 'Could not invoke [{}], only one of retry_seconds:[{}] and retry_minutes:[{}] can be given'.format(
target, retry_seconds, retry_minutes)
self.logger.error(msg)
raise ValueError(msg)
if not(retry_seconds or retry_minutes):
msg = 'Could not invoke [{}], exactly one of retry_seconds:[{}] or retry_minutes:[{}] must be given'.format(
target, retry_seconds, retry_minutes)
self.logger.error(msg)
raise ValueError(msg)
try:
self.server.service_store.name_to_impl_name[callback]
except KeyError, e:
msg = 'Service:[{}] does not exist, e:[{}]'.format(callback, format_exc(e))
self.logger.error(msg)
raise ValueError(msg)
# Get rid of arguments our superclass doesn't understand
for item in('async_fallback', 'callback', 'callback_context', 'retry_repeats', 'retry_seconds', 'retry_minutes'):
kwargs.pop(item, True)
# Note that internally we use seconds only.
return async_fallback, callback, callback_context, retry_repeats, retry_seconds or retry_minutes * 60, kwargs
def _invoke_async_retry(self, target, retry_repeats, retry_seconds, orig_cid, callback, callback_context, args, kwargs):
# Request to invoke the background service with ..
retry_request = {
'target': target,
'retry_repeats': retry_repeats,
'retry_seconds': retry_seconds,
'orig_cid': orig_cid,
'callback': callback,
'callback_context': callback_context,
'args': args,
'kwargs': kwargs
}
return self.invoke_async(_InvokeRetry.get_name(), dumps(retry_request))
def invoke_async_retry(self, target, *args, **kwargs):
async_fallback, callback, callback_context, retry_repeats, retry_seconds, kwargs = self._get_retry_settings(target, **kwargs)
return self._invoke_async_retry(target, retry_repeats, retry_seconds, self.cid, callback, callback_context, args, kwargs)
def invoke_retry(self, target, *args, **kwargs):
async_fallback, callback, callback_context, retry_repeats, retry_seconds, kwargs = self._get_retry_settings(target, **kwargs)
# Let's invoke the service and find out if it works, maybe we don't need
# to retry anything.
try:
result = self.invoke(target, *args, **kwargs)
except Exception, e:
msg = 'Could not invoke:[{}], cid:[{}], e:[{}]'.format(target, self.cid, format_exc(e))
self.logger.warn(msg)
# How we handle the exception depends on whether the caller wants us
# to block or prefers if we retry in background.
if async_fallback:
# .. invoke the background service and return CID to the caller.
cid = self._invoke_async_retry(target, retry_repeats, retry_seconds, self.cid, callback, callback_context, args, kwargs)
raise NeedsRetry(cid, e)
# We are to block while repeating
else:
# Repeat the given number of times sleeping for as many seconds as we are told
remaining = retry_repeats
result = None
while remaining > 0:
try:
result = self.invoke(target, *args, **kwargs)
except Exception, e:
msg = _retry_failed_msg((retry_repeats-remaining)+1, retry_repeats, target, retry_seconds, self.cid, e)
self.logger.info(msg)
sleep(retry_seconds)
remaining -= 1
# OK, give up now, there's nothing more we can do
if not result:
msg = _retry_limit_reached_msg(retry_repeats, target, retry_seconds, self.cid)
self.logger.warn(msg)
raise ZatoException(None, msg)
else:
# All good, simply return the response
return result | zato-invoke-retry | /zato-invoke-retry-1.1.1.tar.gz/zato-invoke-retry-1.1.1/src/zato/invoke_retry/__init__.py | __init__.py |
#
# * Django-like Redis pagination - a drop-in replacement except for the __init__ method.
#
# * Originally part of Zato - ESB, SOA and cloud integrations in Python https://zato.io
#
# Examples below are self-contained and ready to copy'n'paste
def list_example():
""" Example list pagination.
"""
from uuid import uuid4
from redis import StrictRedis
from zato.redis_paginator import ListPaginator
conn = StrictRedis()
key = 'paginator:{}'.format(uuid4().hex)
for x in range(1, 18):
conn.rpush(key, x)
p = ListPaginator(conn, key, 6)
print(p.count) # 17
print(p.num_pages) # 3
print(p.page_range) # [1, 2, 3]
page = p.page(3)
print(page) # <Page 3 of 3>
print(page.object_list) # ['13', '14', '15', '16', '17']
conn.delete(key)
def zset_example():
""" Example sorted set pagination.
"""
from uuid import uuid4
from redis import StrictRedis
from zato.redis_paginator import ZSetPaginator
conn = StrictRedis()
key = 'paginator:{}'.format(uuid4().hex)
# 97-114 is 'a' to 'r' in ASCII
for x in range(1, 18):
conn.zadd(key, x, chr(96 + x))
p = ZSetPaginator(conn, key, 6)
print(p.count) # 17
print(p.num_pages) # 3
print(p.page_range) # [1, 2, 3]
page = p.page(3)
print(page) # <Page 3 of 3>
print(page.object_list) # ['m', 'n', 'o', 'p', 'q']
conn.delete(key)
def zset_score_min_max_example():
""" Example sorted set with min/max score pagination.
"""
from uuid import uuid4
from redis import StrictRedis
from zato.redis_paginator import ZSetPaginator
conn = StrictRedis()
key = 'paginator:{}'.format(uuid4().hex)
# 97-114 is 'a' to 'r' in ASCII
for x in range(1, 18):
conn.zadd(key, x, chr(96 + x))
p = ZSetPaginator(conn, key, 2, score_min=5, score_max=13)
print(p.count) # 9
print(p.num_pages) # 5
print(p.page_range) # [1, 2, 3, 4, 5]
page = p.page(3)
print(page) # <Page 3 of 5>
print(page.object_list) # ['i', 'j']
conn.delete(key) | zato-redis-paginator | /zato-redis-paginator-1.0.tar.gz/zato-redis-paginator-1.0/src/zato/redis_paginator/examples.py | examples.py |
#
# * Django-like Redis pagination - a drop-in replacement except for the __init__ method.
#
# * Originally part of Zato - ESB, SOA and cloud integrations in Python https://zato.io
#
# Django
from django.core.paginator import Paginator
# ##############################################################################
class _ListObjectList(object):
""" List-backed list of results to paginate.
"""
def __init__(self, conn, key, *ignored):
self.conn = conn
self.key = key
def __getslice__(self, start, stop):
return self.conn.lrange(self.key, start, stop-1)
def count(self):
return self.conn.llen(self.key)
class _ZSetObjectList(object):
""" Sorted set-backed list of results to paginate.
"""
def __init__(self, conn, key, score_min, score_max):
self.conn = conn
self.key = key
self.score_min = score_min
self.score_max = score_max
self._use_zrangebyscore = score_min != '-inf' or score_max != '+inf'
self._zrangebyscore_results = None
def _get_zrangebyscore(self):
if not self._zrangebyscore_results:
self._zrangebyscore_results = self.conn.zrangebyscore(self.key, self.score_min, self.score_max)
return self._zrangebyscore_results
def __getslice__(self, start, stop):
if self._use_zrangebyscore:
return self._get_zrangebyscore()[start:stop]
else:
return self.conn.zrange(self.key, start, stop-1)
def count(self):
if self._use_zrangebyscore:
return len(self._get_zrangebyscore())
else:
return self.conn.zcard(self.key)
# ##############################################################################
_source_type_object_list = {
'list': _ListObjectList,
'zset': _ZSetObjectList,
}
class RedisPaginator(Paginator):
""" A subclass of Django's paginator that can paginate results kept in Redis.
Data in Redis can be
1) a list,
2) sorted set or
3) a range of a sorted set's members with a score between min and max.
For 1) and 2) data won't be fetched prior to pagination
For 3) however the whole subset as specified by score_min and score_max will be fetched
locally the first time it's needed and any changes in Redis won't be reflected
in the paginator until a new one is created. This is needed because ZRANGEBYSCORE
doesn't provide means to learn how many results there are without first fetching
them so even though the command has a 'LIMIT offset count' parameter, it cannot
be used here.
conn - a connection handle to Redis (subclass of such as redis.StrictRedis)
key - Redis key where data is stored
per_page - how many results per page to return
orphans - as in Django
allow_empty_first_page - as in Django
score_min - (ignored if key is not a list) 'min' parameter to ZRANGEBYSCORE, defaults to '-inf'
score_max - (ignored if key is not a list) 'max' parameter to ZRANGEBYSCORE, defaults to '+inf'
source_type - must be either 'list' or 'zset' to indicate what datatype is kept under given key
"""
def __init__(self, conn, key, per_page, orphans=0, allow_empty_first_page=True, score_min='-inf', score_max='+inf', source_type=None):
object_list_class = _source_type_object_list[source_type]
object_list = object_list_class(conn, key, score_min, score_max)
super(RedisPaginator, self).__init__(object_list, per_page, orphans, allow_empty_first_page)
# ##############################################################################
class ListPaginator(RedisPaginator):
""" A paginator for Redis list. See parent class's docstring for details.
"""
def __init__(self, *args, **kwargs):
kwargs['source_type'] = 'list'
super(ListPaginator, self).__init__(*args, **kwargs)
class ZSetPaginator(RedisPaginator):
""" A paginator for Redis sorted sets. See parent class's docstring for details.
"""
def __init__(self, *args, **kwargs):
kwargs['source_type'] = 'zset'
super(ZSetPaginator, self).__init__(*args, **kwargs) | zato-redis-paginator | /zato-redis-paginator-1.0.tar.gz/zato-redis-paginator-1.0/src/zato/redis_paginator/__init__.py | __init__.py |
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from logging import getLogger
# Vault
from hvac import Client as _Client
# ################################################################################################################################
logger = getLogger(__name__)
# ################################################################################################################################
class NameId(object):
""" Wraps both an attribute's name and its ID.
"""
def __init__(self, name, id):
self.name = name
self.id = id
# ################################################################################################################################
class VAULT:
class DEFAULT:
TIMEOUT = 10
URL = 'http://localhost:8200'
class HEADERS:
TOKEN_VAULT = 'HTTP_X_ZATO_VAULT_TOKEN'
TOKEN_GH = 'HTTP_X_ZATO_VAULT_TOKEN_GITHUB'
USERNAME = 'HTTP_X_ZATO_VAULT_USERNAME'
PASSWORD = 'HTTP_X_ZATO_VAULT_PASSWORD'
MOUNT_POINT = 'HTTP_X_ZATO_VAULT_MOUNT_POINT'
TOKEN_RESPONSE = 'X-Zato-Vault-Token'
TOKEN_RESPONSE_LEASE = 'X-Zato-Vault-Token-Lease-Duration'
class AUTH_METHOD:
GITHUB = NameId('GitHub', 'github')
TOKEN = NameId('Token', 'token')
USERNAME_PASSWORD = NameId('Username/password', 'username-password')
class __metaclass__(type):
def __iter__(self):
return iter((self.GITHUB, self.TOKEN, self.USERNAME_PASSWORD))
VAULT.METHOD_HEADER = {
VAULT.AUTH_METHOD.GITHUB.id: VAULT.HEADERS.TOKEN_GH,
VAULT.AUTH_METHOD.TOKEN.id: VAULT.HEADERS.TOKEN_VAULT,
VAULT.AUTH_METHOD.USERNAME_PASSWORD.id: (VAULT.HEADERS.USERNAME, VAULT.HEADERS.PASSWORD, VAULT.HEADERS.MOUNT_POINT),
}
VAULT.WEB_SOCKET = {
'github': {'secret': VAULT.HEADERS.TOKEN_GH},
'token': {'secret': VAULT.HEADERS.TOKEN_VAULT},
'username-password': {
'username': VAULT.HEADERS.USERNAME,
'secret': VAULT.HEADERS.PASSWORD,
}
}
# ################################################################################################################################
class VaultResponse(object):
""" A convenience class to hold individual attributes of responses from Vault.
"""
__slots__ = ('action', 'client_token', 'lease_duration', 'accessor', 'policies')
def __init__(self, action=None, client_token=None, lease_duration=None, accessor=None, policies=None):
self.action = action
self.client_token = client_token
self.lease_duration = lease_duration
self.accessor = accessor
self.policies = policies
def __str__(self):
attrs = []
for elem in sorted(self.__slots__):
value = getattr(self, elem)
attrs.append('{}:{}'.format(elem, value))
return '<{} at {}, {}>'.format(self.__class__.__name__, hex(id(self)), ', '.join(attrs))
@staticmethod
def from_vault(action, response, main_key='auth', token_key='client_token', has_lease_duration=True):
""" Builds a VaultResponse out of a dictionary returned from Vault.
"""
auth = response[main_key]
vr = VaultResponse(action)
vr.client_token = auth[token_key]
vr.accessor = auth['accessor']
vr.policies = auth['policies']
if has_lease_duration:
vr.lease_duration = auth['lease_duration']
return vr
# ################################################################################################################################
class Client(_Client):
""" A thin wrapper around hvac.Client providing connectivity to Vault.
"""
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self._auth_func = {
VAULT.AUTH_METHOD.TOKEN.id: self._auth_token,
VAULT.AUTH_METHOD.USERNAME_PASSWORD.id: self._auth_username_password,
VAULT.AUTH_METHOD.GITHUB.id: self._auth_github,
}
def __str__(self):
return '<{} at {}, {}>'.format(self.__class__.__name__, hex(id(self)), self._url)
__repr__ = __str__
def ping(self):
return self.is_sealed
def _auth_token(self, client_token, _from_vault=VaultResponse.from_vault):
if not client_token:
raise ValueError('Client token missing on input')
response = self.lookup_token(client_token)
return _from_vault('auth_token', response, 'data', 'id', False)
def _auth_username_password(self, username, password, _from_vault=VaultResponse.from_vault):
return _from_vault('auth_userpass', self.auth_userpass(username, password, use_token=False))
def _auth_github(self, gh_token, _from_vault=VaultResponse.from_vault):
return _from_vault('auth_github', self.auth_github(gh_token, use_token=False))
def renew(self, client_token, _from_vault=VaultResponse.from_vault):
return _from_vault('renew', self.renew_token(client_token))
def authenticate(self, auth_method, *credentials):
return self._auth_func[auth_method](*credentials)
# ################################################################################################################################ | zato-vault-client | /zato-vault-client-1.4.tar.gz/zato-vault-client-1.4/src/zato/vault/client.py | client.py |
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Copyright (C) 2017 Zato Source s.r.o. https://zato.io
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from gevent.monkey import patch_all
patch_all()
# stdlib
import logging
import subprocess
from datetime import datetime, timedelta
from json import dumps, loads
from traceback import format_exc
from uuid import uuid4
# gevent
from gevent import sleep, spawn
# six
from six import binary_type
from six.moves.http_client import OK
# ws4py
from ws4py.client.geventclient import WebSocketClient
_invalid = '_invalid.' + uuid4().hex
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger('zato_ws_client')
# ################################################################################################################################
class MSG_PREFIX:
_COMMON = 'zato.ws.client.{}'
INVOKE_SERVICE = _COMMON.format('invs.{}')
SEND_AUTH = _COMMON.format('auth.{}')
SEND_RESP = _COMMON.format('resp.{}')
# ################################################################################################################################
zato_keep_alive_ping = 'zato-keep-alive-ping'
# ################################################################################################################################
class Config(object):
def __init__(self, client_name=None, client_id=None, address=None, username=None, secret=None, on_request_callback=None,
wait_time=5):
self.client_name = client_name
self.client_id = client_id
self.address = address
self.username = username
self.secret = secret
self.on_request_callback = on_request_callback
self.wait_time = wait_time
# ################################################################################################################################
class MessageToZato(object):
""" An individual message from a WebSocket client to Zato, either request or response to a previous request from Zato.
"""
action = _invalid
def __init__(self, msg_id, config, token=None):
self.config = config
self.msg_id = msg_id
self.token = token
def serialize(self, _now=datetime.utcnow):
return dumps(self.enrich({
'data': {},
'meta': {
'action': self.action,
'id': self.msg_id,
'timestamp': _now().isoformat(),
'token': self.token,
'client_id': self.config.client_id,
'client_name': self.config.client_name,
}
}))
def enrich(self, msg):
""" Implemented by subclasses that need to add extra information.
"""
return msg
# ################################################################################################################################
class AuthRequest(MessageToZato):
""" Logs a client into a WebSocket connection.
"""
action = 'create-session'
def enrich(self, msg):
msg['meta']['username'] = self.config.username
msg['meta']['secret'] = self.config.secret
return msg
# ################################################################################################################################
class ServiceInvokeRequest(MessageToZato):
""" Encapsulates information about an invocation of a Zato service.
"""
action = 'invoke-service'
def __init__(self, request_id, data, *args, **kwargs):
self.data = data
super(ServiceInvokeRequest, self).__init__(request_id, *args, **kwargs)
def enrich(self, msg):
msg['data'].update(self.data)
return msg
# ################################################################################################################################
class ResponseFromZato(object):
""" A response from Zato to a previous request by this client.
"""
__slots__ = ('id', 'timestamp', 'in_reply_to', 'status', 'is_ok', 'data', 'msg_impl')
def __init__(self):
self.id = None
self.timestamp = None
self.in_reply_to = None
self.status = None
self.is_ok = None
self.data = None
self.msg_impl = None
@staticmethod
def from_json(msg):
response = ResponseFromZato()
response.msg_impl = msg
meta = msg['meta']
response.id = meta['id']
response.timestamp = meta['timestamp']
response.in_reply_to = meta['in_reply_to']
response.status = meta['status']
response.is_ok = response.status == OK
response.data = msg.get('data')
return response
# ################################################################################################################################
class RequestFromZato(object):
""" A request from Zato to this client.
"""
__slots__ = ('id', 'timestamp', 'data', 'msg_impl')
def __init__(self):
self.id = None
self.timestamp = None
self.data = None
self.msg_impl = None
@staticmethod
def from_json(msg):
request = RequestFromZato()
request.msg_impl = msg
request.id = msg['meta']['id']
request.timestamp = msg['meta']['timestamp']
request.data = msg['data']
return request
# ################################################################################################################################
class ResponseToZato(MessageToZato):
""" A response from this client to a previous request from Zato.
"""
action = 'client-response'
def __init__(self, in_reply_to, data, *args, **kwargs):
self.in_reply_to = in_reply_to
self.data = data
super(ResponseToZato, self).__init__(*args, **kwargs)
def enrich(self, msg):
msg['meta']['in_reply_to'] = self.in_reply_to
msg['data']['response'] = self.data
return msg
# ################################################################################################################################
class _WSClient(WebSocketClient):
""" A low-level subclass of around ws4py's WebSocket client functionality.
"""
def __init__(self, on_connected_callback, on_message_callback, on_error_callback, *args, **kwargs):
self.on_connected_callback = on_connected_callback
self.on_message_callback = on_message_callback
self.on_error_callback = on_error_callback
super(_WSClient, self).__init__(*args, **kwargs)
def opened(self):
spawn(self.on_connected_callback)
def received_message(self, msg):
self.on_message_callback(msg)
def unhandled_error(self, error):
spawn(self.on_error_callback, error)
# ################################################################################################################################
class Client(object):
""" A WebSocket client that knows how to invoke Zato services.
"""
def __init__(self, config):
self.config = config
self.conn = _WSClient(self.on_connected, self.on_message, self.on_error, self.config.address)
self.keep_running = True
self.is_authenticated = False
self.auth_token = None
self.on_request_callback = self.config.on_request_callback
# Keyed by IDs of requests sent from this client to Zato
self.requests_sent = {}
# Same key as self.requests_sent but the dictionary contains responses to previously sent requests
self.responses_received = {}
# Requests initiated by Zato, keyed by their IDs
self.requests_received = {}
# ################################################################################################################################
def send(self, msg_id, msg, wait_time=2):
""" Spawns a greenlet to send a message to Zato.
"""
spawn(self._send, msg_id, msg, msg.serialize(), wait_time)
# ################################################################################################################################
def _send(self, msg_id, msg, serialized, wait_time):
""" Sends a request to Zato and waits up to wait_time or self.config.wait_time seconds for a reply.
"""
logger.info('Sending msg `%s`', serialized)
# So that it can be correlated with a future response
self.requests_sent[msg_id] = msg
# Actually send the messageas string now
self.conn.send(serialized)
# ################################################################################################################################
def _wait_for_response(self, request_id, wait_time=None, _now=datetime.utcnow, _delta=timedelta, _sleep=sleep):
""" Wait until a response arrives and return it
or return None if there is no response up to wait_time or self.config.wait_time.
"""
now = _now()
until = now + _delta(seconds=wait_time or self.config.wait_time)
while now < until:
response = self.responses_received.get(request_id)
if response:
return response
else:
_sleep(0.01)
now = _now()
# ################################################################################################################################
def authenticate(self, request_id):
""" Authenticates the client with Zato.
"""
logger.info('Authenticating as `%s` (%s %s)', self.config.username, self.config.client_name, self.config.client_id)
spawn(self.send, request_id, AuthRequest(request_id, self.config, self.auth_token))
# ################################################################################################################################
def on_connected(self):
""" Invoked upon establishing an initial connection - logs the client in with self.config's credentials
"""
logger.info('Connected to `%s` %s (%s %s)',
self.config.address,
'as `{}`'.format(self.config.username) if self.config.username else 'without credentials',
self.config.client_name, self.config.client_id)
request_id = MSG_PREFIX.SEND_AUTH.format(uuid4().hex)
self.authenticate(request_id)
response = self._wait_for_response(request_id)
if not response:
logger.warn('No response to authentication request `%s`', request_id)
else:
self.auth_token = response.data['token']
self.is_authenticated = True
del self.responses_received[request_id]
logger.info('Authenticated successfully as `%s` (%s %s)',
self.config.username, self.config.client_name, self.config.client_id)
# ################################################################################################################################
def on_message(self, msg, _uuid4=uuid4):
""" Invoked for each message received from Zato, both for responses to previous requests and for incoming requests.
"""
_msg = loads(msg.data.decode('utf-8') if isinstance(msg.data, binary_type) else msg.data)
logger.info('Received message `%s`', _msg)
in_reply_to = _msg['meta'].get('in_reply_to')
# Reply from Zato to one of our requests
if in_reply_to:
self.responses_received[in_reply_to] = ResponseFromZato.from_json(_msg)
# Request from Zato
else:
data = self.on_request_callback(RequestFromZato.from_json(_msg))
response_id = MSG_PREFIX.SEND_RESP.format(_uuid4().hex)
self.send(response_id, ResponseToZato(_msg['meta']['id'], data, response_id, self.config, self.auth_token))
# ################################################################################################################################
def on_error(self, error):
""" Invoked for each unhandled error in the lower-level ws4py library.
"""
logger.warn('Caught error %s', error)
# ################################################################################################################################
def _run(self):
self.conn.connect()
# ################################################################################################################################
def run(self, max_wait=2):
spawn(self._run)
now = datetime.utcnow()
until = now + timedelta(seconds=max_wait)
while not self.is_authenticated:
sleep(0.01)
now = datetime.utcnow()
if now >= until:
return
# ################################################################################################################################
def stop(self):
self.keep_running = False
self.conn.close()
# ################################################################################################################################
def invoke(self, request):
if not self.is_authenticated:
raise Exception('Client is not authenticated')
request_id = MSG_PREFIX.INVOKE_SERVICE.format(uuid4().hex)
spawn(self.send, request_id, ServiceInvokeRequest(request_id, request, self.config, self.auth_token))
response = self._wait_for_response(request_id)
if not response:
logger.warn('No response to invocation request `%s`', request_id)
else:
return response
# ################################################################################################################################
if __name__ == '__main__':
def on_request_from_zato(msg):
try:
return subprocess.check_output(msg.data['cmd'])
except Exception as e:
return format_exc(e)
config = Config()
config.client_name = 'My Client'
config.client_id = '32351b3f5d16'
address = 'ws://127.0.0.1:47043/zato.ws.apitests'
config.address = address
config.username = 'user1'
config.secret = 'secret1'
config.on_request_callback = on_request_from_zato
client = Client(config)
client.run()
client.invoke({'service':'zato.ping'})
logger.info('Press Ctrl-C to quit')
try:
x = 0
while x < 1000 and client.keep_running:
sleep(0.2)
except KeyboardInterrupt:
client.stop()
# ################################################################################################################################ | zato-websocket-client | /zato-websocket-client-1.6.1.tar.gz/zato-websocket-client-1.6.1/src/zato/websocket/client.py | client.py |

# zava
Parallel coordinates with Grand Tour for exploratory data visualization of massive and high-dimensional data in Python. If you want a desktop application for use, try [VizApp](https://github.com/oneoffcoder/vizapp).
- [GitHub](https://github.com/oneoffcoder/zava)
- [Documentation](https://zava.readthedocs.io/)
- [PyPi](https://pypi.org/project/zava/)
- [Gitter](https://gitter.im/dataflava/zava)
# Requirements
At a minimum, you will need the following packages.
- python=3.8.3
- numpy=1.19.2
- scipy=1.5.2
- pandas=0.25.3
- matplotlib=3.3.2
If you are generating animation videos, you will also need [ffmpeg](https://ffmpeg.org/) installed, as that is what matplotlib uses by default to render videos.
## Issues
You may get into situations where ffmpeg appears to hang or stall. There are a lot of issues reported with matplotlib and ffmpeg interoperability.
- [Matplotlib + ffmpeg crashes when saving MP4 file](https://github.com/spack/spack/issues/18071)
- [ffmpeg hangs when run in background](https://stackoverflow.com/questions/16523746/ffmpeg-hangs-when-run-in-background)
# Installation
## From pypi
``bash
pip install zava
``
## From source
```bash
python setup.py install
```
# Copyright
## Software
```
Copyright 2020 One-Off Coder
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```
## Art
Copyright 2020 Daytchia Vang
# Citation
```
@misc{oneoffcoder_zava_2020,
title={zava, Parallel Coordinates with Grand Tour for TypeScript and Python},
url={https://github.com/oneoffcoder/zava},
author={Jee Vang},
year={2020},
month={Dec}}
```
# Sponsor, Love
- [Patreon](https://www.patreon.com/vangj)
- [GitHub](https://github.com/sponsors/vangj)
| zava | /zava-0.0.2.tar.gz/zava-0.0.2/README.md | README.md |
MIT License
Copyright (c) 2021 voilalex
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| zawarudo | /zawarudo-1.0.0.tar.gz/zawarudo-1.0.0/LICENSE.md | LICENSE.md |
import logging
from typing import Union, Dict
from zawn_orm.codec import CodecFactory
DEBUG = False
class BaseField(object):
""" 基础字段类 """
type_name = '_'
def __init__(self, **kwargs):
self.class_name = self.__class__.__name__
self.unique: bool = bool(kwargs.get('unique'))
self.index: bool = bool(kwargs.get('index'))
def __str__(self):
return f'<{self.class_name}>'
def __repr__(self):
return f'<{self.class_name}>'
def load(self, data):
""" 装载数据 """
codec = CodecFactory.get(self.type_name)
try:
return codec.on_load(data)
except Exception as e:
if DEBUG:
logging.error(f'字段值读取出错:{self.class_name}:{data}<-{type(data)}:{e}', exc_info=True)
return codec.default_value
def to_json(self, data):
""" 转为json """
codec = CodecFactory.get(self.type_name)
try:
return codec.on_to_json(data)
except Exception as e:
if DEBUG:
logging.error(f'字段值转JSON出错:{self.class_name}:{data}<-{type(data)}:{e}', exc_info=True)
return codec.default_value
def to_db(self, data):
""" 转为数据库数据 """
codec = CodecFactory.get(self.type_name)
try:
return codec.on_to_db(data)
except Exception as e:
if DEBUG:
logging.error(f'字段值转数据库格式出错:{self.class_name}:{data}<-{type(data)}:{e}', exc_info=True)
return codec.default_value
class IDField(BaseField):
""" 唯一主键字段 """
type_name = 'id'
class BooleanField(BaseField):
""" 布尔字段类 """
type_name = 'boolean'
class StringField(BaseField):
""" 字符串字段类 """
type_name = 'string'
class FloatField(BaseField):
""" 浮点数字字段类 """
type_name = 'float'
class IntegerField(BaseField):
""" 整型数字字段类 """
type_name = 'integer'
class DecimalField(BaseField):
""" 十进制数字字段类 """
type_name = 'decimal'
class DatetimeField(BaseField):
""" 日期时间字段类 """
type_name = 'datetime'
class ObjectField(BaseField):
""" 对象字段类 """
type_name = 'object'
def __init__(self, field_dict: Dict[str, BaseField], **kwargs):
super().__init__(**kwargs)
self.field_dict = field_dict
def object_transform(self, data: dict, method_name: str) -> dict:
out = {}
for codec_name, codec_object in self.field_dict.items():
value = data.get(codec_name)
if value is None:
continue
method = getattr(codec_object, method_name)
if not callable(method):
continue
out[codec_name] = method(value)
return out
def load(self, data: dict) -> dict:
return self.object_transform(data, 'load')
def to_json(self, data: dict) -> dict:
return self.object_transform(data, 'to_json')
def to_db(self, data: dict) -> dict:
return self.object_transform(data, 'to_db')
class ArrayField(BaseField):
""" 数组字段类 """
type_name = 'array'
def __init__(self, field_dict: Union[BaseField, Dict[str, BaseField]], **kwargs):
super().__init__(**kwargs)
self.field_dict = field_dict
def array_transform(self, data: list, method_name: str) -> list:
out = []
for i in data:
# 如果字段定义是字段类型,则直接转换存入,并且跳过循环
if isinstance(self.field_dict, BaseField):
method = getattr(self.field_dict, method_name)
if not callable(method):
continue
out.append(method(i))
continue
row = {}
for field_name, field_class in self.field_dict.items():
value = i.get(field_name)
if value is None:
continue
method = getattr(field_class, method_name)
if not callable(method):
continue
row[field_name] = method(value)
out.append(row)
return out
def load(self, data: list) -> list:
return self.array_transform(data, 'load')
def to_json(self, data: list) -> list:
return self.array_transform(data, 'to_json')
def to_db(self, data: list) -> list:
return self.array_transform(data, 'to_db')
field_mapping: Dict[str, BaseField] = {}
if __name__ == '__main__':
class Model(object):
@classmethod
def load_fields(cls) -> Dict[str, BaseField]:
field_dict = {}
for field_name, field_class in cls.__dict__.items():
if not isinstance(field_class, BaseField):
continue
field_dict[field_name] = field_class
return field_dict
@classmethod
def objects(cls) -> BaseField:
return ObjectField(cls.load_fields())
class SubModel(Model):
s_a = StringField()
s_b = BooleanField()
s_c = FloatField()
s_d = IntegerField()
class MainModel(Model):
a = ObjectField(SubModel.load_fields())
b = ArrayField(SubModel.load_fields())
c = DatetimeField()
d = DecimalField()
e = IntegerField()
f = IntegerField()
data = {
'a': {
's_a': 1,
's_b': 1,
's_c': 1,
's_d': 1,
's_e': 1,
},
'b': [
{
's_a': 0,
's_b': 0,
's_c': 0,
's_d': 0,
's_e': 0,
},
{
's_a': -1,
's_b': -1,
's_c': -1,
's_d': -1,
's_e': -1,
},
],
'c': '2022-08-15T16:00:30.000+0800',
'd': '2.50',
'e': '-',
}
print('')
print(data)
data_class = MainModel.objects()
print('---', '装载数据')
new_data = data_class.load(data)
print(new_data)
print('---', '输出JSON')
json_data = data_class.to_json(new_data)
print(json_data)
print('---', '输出DB')
db_data = data_class.to_db(new_data)
print(db_data) | zawn-orm | /zawn_orm-0.0.16.tar.gz/zawn_orm-0.0.16/zawn_orm/field.py | field.py |
from datetime import datetime
from decimal import Decimal
from typing import Any, Dict, Union, Type
from zawn_orm.exception import InvalidValueException
from zawn_orm.tool import datetime_format, rounding
__all__ = ['DefaultCodec', 'CodecFactory']
class DefaultCodec(object):
""" 默认字段数据类型的编解码器 """
type_name = '_'
default_value = None
def on_transform(self, data: Any) -> Any:
""" 默认转换方法 """
return data
def on_load(self, data: Any) -> Any:
""" 装载方法 """
return self.on_transform(data)
def on_to_json(self, data: Any) -> Any:
""" 输出JSON """
return self.on_transform(data)
def on_to_db(self, data: Any) -> Any:
""" 输出数据库类型 """
return self.on_transform(data)
class IDCodec(DefaultCodec):
""" 唯一主键编解码类 """
type_name = 'id'
class BooleanCodec(DefaultCodec):
""" 布尔编解码类 """
type_name = 'boolean'
def on_transform(self, data: Any) -> bool:
return bool(data)
class StringCodec(DefaultCodec):
""" 字符串编解码类 """
type_name = 'string'
def on_transform(self, data: Any) -> str:
return str(data)
class FloatCodec(DefaultCodec):
""" 浮点数字编解码类 """
type_name = 'float'
def on_transform(self, data: Union[str, int, float, Decimal]) -> float:
return float(data)
class IntegerCodec(DefaultCodec):
""" 整型数字编解码类 """
type_name = 'integer'
def on_transform(self, data: Union[str, int, float, Decimal]) -> int:
return int(data)
class DecimalCodec(DefaultCodec):
""" 十进制数字编解码类 """
type_name = 'decimal'
def on_load(self, data: Union[str, int, float, Decimal]) -> Decimal:
return rounding(Decimal(data), 4)
def on_to_json(self, data: Decimal) -> str:
return str(rounding(Decimal(data), 4))
def on_to_db(self, data: Decimal) -> float:
return float(data)
class DatetimeCodec(DefaultCodec):
""" 日期时间编解码类 """
type_name = 'datetime'
def on_load(self, data: Union[str, int, float]) -> datetime:
if isinstance(data, str):
return datetime.strptime(data, datetime_format).astimezone()
elif isinstance(data, (int, float)):
return datetime.fromtimestamp(int(data / 1000)).astimezone()
elif isinstance(data, datetime):
return data.astimezone()
raise InvalidValueException(f'{str(data)}无法转换成{self.type_name}类型')
def on_to_json(self, data: Union[datetime]) -> str:
if isinstance(data, datetime):
return data.astimezone().strftime(datetime_format)
raise InvalidValueException(f'{str(data)}无法转换成{self.type_name}类型')
def on_to_db(self, data: Union[datetime]) -> int:
if isinstance(data, datetime):
return int(data.timestamp() * 1000)
raise InvalidValueException(f'{str(data)}无法转换成{self.type_name}类型')
class CodecFactory(object):
""" 编解码器工厂 """
codec_tuple = (
DefaultCodec, IDCodec, BooleanCodec,
StringCodec, FloatCodec, IntegerCodec,
DecimalCodec, DatetimeCodec,
)
codec_mapping: Dict[str, DefaultCodec] = {c.type_name: c() for c in codec_tuple}
@classmethod
def set(cls, codec: Type[DefaultCodec], force: bool = False):
if codec.type_name in cls.codec_mapping and not force:
return
cls.codec_mapping[codec.type_name] = codec()
@classmethod
def get(cls, type_name: str) -> DefaultCodec:
return cls.codec_mapping.get(type_name, cls.codec_mapping[DefaultCodec.type_name])
if __name__ == '__main__':
test_data = (-1, 0, 1, '-', '2022-09-10T16:30:00.123456+0800', datetime.now())
for type_name, codec in CodecFactory.codec_mapping.items():
print('-' * 20)
for i in test_data:
for ii in ('on_load', 'on_to_json', 'on_to_db'):
try:
method = getattr(codec, ii)
print(type_name, i, ii, method(i))
except Exception as e:
print(type_name, i, ii, f'出错:{e}') | zawn-orm | /zawn_orm-0.0.16.tar.gz/zawn_orm-0.0.16/zawn_orm/codec.py | codec.py |
import logging
from typing import List, Dict, Tuple
from zawn_orm.exception import InvalidOperationException
from zawn_orm.tool import MakeID
operation_tuple = ('insert', 'update', 'delete', 'index')
class Query(object):
""" 数据库查询类 """
max_length: int = 1000 # 查询时最大行数限制
def __init__(self, table_name: str):
self.table_name = table_name
self.filter = {}
def set_query(self, filter: dict) -> 'Query':
""" 设置查询条件 """
self.filter = filter
return self
def get_query(self):
""" 获取查询条件 """
return self.filter
class Operation(object):
""" 数据库操作类 """
def __init__(self, table_name: str):
self.table_name = table_name
self.operation = None
self.filter = {}
self.update = {}
def set_operation(self, operation: str, filter: dict, update: dict) -> 'Operation':
""" 设置操作属性 """
if operation not in operation_tuple:
raise InvalidOperationException(f'{operation}是无效操作')
self.operation = operation
self.filter = filter
self.update = update
return self
def get_operation(self):
return self.table_name, self.operation, self.filter, self.update
class Result(object):
""" 数据库操作结果 """
def __init__(self):
self.result_dict = {}
def add(self, table_name: str, operation: str, number: int = 1) -> int:
""" 增加操作统计数 """
if table_name not in self.result_dict:
self.result_dict[table_name] = {i: 0 for i in operation_tuple}
if operation not in operation_tuple:
return 0
self.result_dict[table_name][operation] += number
return self.result_dict[table_name][operation]
def get(self) -> Dict[str, dict]:
return self.result_dict
class Database(object):
""" 数据库基类 """
singleton_mapping: Dict[str, 'Database'] = {}
database_type: str = 'base'
client = None
database = None
class OperationCount(object):
insert: int = 0
update: int = 0
delete: int = 0
def result(self) -> dict:
return {
'insert': self.insert,
'update': self.update,
'delete': self.delete,
}
def __str__(self) -> str:
return str(self.result())
def __repr__(self) -> str:
return str(self.result())
def __new__(cls, *args, **kwargs):
""" 每个类有独立的单例对象 """
instance = cls.singleton_mapping.get(cls.__name__)
if instance is None:
instance = super().__new__(cls, **kwargs)
cls.singleton_mapping[cls.__name__] = instance
return instance
def __init__(self):
pass
def init(self, model_class, **kwargs):
return self
async def connect(self, uri: str) -> 'Database':
return self
async def close(self):
pass
def new_id(self, id_prefix='') -> str:
""" 获取一个新的id """
return MakeID.next(id_prefix)
def new_query(self, table_name: str) -> Query:
""" 获取一个新的查询 """
return Query(table_name)
def new_operation(self, table_name: str) -> Operation:
""" 获取一个新的操作 """
operation = Operation(table_name)
return operation
async def find_one(self, query: Query, **kwargs) -> dict:
try:
return await self.on_find_one(query, **kwargs)
except Exception as e:
logging.error(f'数据库执行查询数据单条时出错{e}', exc_info=True)
return {}
async def search(self, query: Query, **kwargs) -> Tuple[dict, list]:
try:
return await self.on_search(query, **kwargs)
except Exception as e:
logging.error(f'数据库执行查询时出错{e}', exc_info=True)
return {'skip': 0, 'limit': 0, 'total': 0}, []
async def execute(self, operation_list: List[Operation], **kwargs) -> dict:
result = {}
try:
result = await self.on_execute(operation_list, **kwargs)
except Exception as e:
logging.error(f'数据库执行变更时出错{e}', exc_info=True)
return result
async def on_find_one(self, query: Query, **kwargs) -> dict:
return {}
async def on_search(self, query: Query, **kwargs) -> Tuple[dict, list]:
return {'skip': 0, 'limit': 0, 'total': 0}, []
async def on_execute(self, operation_list: List[Operation], **kwargs) -> dict:
result = Result()
for i in operation_list:
result.add(i.table_name, i.operation, 1)
return result.get()
async def init_index(self, operation_list: List[Operation]) -> dict:
""" 初始化索引 """ | zawn-orm | /zawn_orm-0.0.16.tar.gz/zawn_orm-0.0.16/zawn_orm/database.py | database.py |
import os
import socket
import time
from decimal import Decimal, ROUND_HALF_UP
from typing import Union
datetime_format = '%Y-%m-%dT%H:%M:%S.%f%z'
def get_table_name(model_class):
""" 获取模型类的名字,转换成表名 """
prefix_list = []
chat_list = []
for i in model_class.__name__:
if i == i.upper():
prefix_list.append(i) # 大写前缀
chat_list.append('_')
chat_list.append(i.lower())
else:
chat_list.append(i)
if chat_list[0] == '_':
chat_list.pop(0)
return ''.join(prefix_list), ''.join(chat_list)
def get_machine_number(mod_number=65536):
""" 根据主机名称和进程号生成机器号码 """
hostname = socket.gethostname().encode('utf-8')
machine_number = os.getpid()
for i in hostname:
machine_number = ((machine_number << 8) + i) % mod_number
return machine_number
class MakeID(object):
'''生成code'''
sn = int(time.time()) % 65536
maps = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
length = len(maps)
mod_number = length ** 2
pid = get_machine_number(length ** 4)
@classmethod
def mod(cls, num, length):
ids = []
while num >= length:
num, y = divmod(num, length)
ids.append(cls.maps[y])
ids.append(cls.maps[num])
id = ''.join(ids[::-1])
return id
@classmethod
def next(cls, prefix='', code_length=16, size_type: str = 'long'):
cls.sn = (cls.sn + 1) % cls.mod_number
fid = cls.maps[0] * code_length
tid = f'{cls.mod(int(time.time() * 1000), cls.length):0>6}'
pid = f'{cls.mod(cls.pid, cls.length):0>4}'
sid = f'{cls.mod(cls.sn, cls.length):0>2}'
id = (fid + tid + pid + sid)[-code_length + len(prefix):]
code = prefix + id
return code
def rounding(number: Union[int, float, str, Decimal], dig: int = 4):
""" 对数字进行四舍五入 """
n = Decimal(number)
dig = int(dig)
return n.quantize(
Decimal('0' if dig <= 0 else f'0.{"0" * dig}'),
rounding=ROUND_HALF_UP,
) | zawn-orm | /zawn_orm-0.0.16.tar.gz/zawn_orm-0.0.16/zawn_orm/tool.py | tool.py |
import datetime
from typing import Dict, List, Tuple, Union
from zawn_orm.database import Database, Operation
from zawn_orm.field import BaseField, IDField, IntegerField, ObjectField
from zawn_orm.state import BaseState
from zawn_orm.tool import get_table_name
class Model(object):
""" 模型基类 """
v = IntegerField() # 版本号
root_field_dict: Dict[str, ObjectField] = {}
database: Database = Database()
def __new__(cls, *args, **kwargs):
""" 每个类有独立的单例对象 """
if cls.__name__ not in model_mapping:
instance = super().__new__(cls, **kwargs)
instance.init()
model_mapping[instance.table_name] = model_mapping[cls.__name__] = instance
return model_mapping[cls.__name__]
def init(self):
self.index_flag: bool = False # 索引标识,为true则已经初始化了
self.id_field: str = getattr(self, 'id_field', 'id') # 唯一字段名称
self.id_prefix: str = getattr(self, 'id_prefix', '') # 唯一字段前缀
self.table_prefix: str = getattr(self, 'table_prefix', '') # 表名前缀
self.state = BaseState(getattr(self, 'state', None)) # 有限状态机
self.init_table_name() # 初始化模型的表名
@classmethod
def set_database(cls, database: Database):
""" 设置数据库 """
cls.database = database.init(cls)
@classmethod
def load_fields(cls) -> Dict[str, BaseField]:
""" 读取定义的字段信息,返回字段定义字典 """
field_dict = {}
for field_name, field_class in cls.__dict__.items():
if not isinstance(field_class, BaseField):
continue
field_dict[field_name] = field_class
return field_dict
def objects(self) -> BaseField:
""" 获取字段对象 """
class_name = self.__class__.__name__
if class_name not in self.root_field_dict:
field_dict = {self.id_field: IDField(unique=True), **self.load_fields()} # 默认加上唯一主键字段
Model.root_field_dict[class_name] = ObjectField(field_dict)
return Model.root_field_dict[class_name]
def init_table_name(self, table_name: str = ''):
""" 初始化模型的表名 """
if getattr(self, 'table_name', None):
return
if table_name and isinstance(table_name, str):
self.table_name = table_name
return
self.id_prefix, self.table_name = get_table_name(self.__class__)
if self.table_prefix:
self.table_name = '_'.join([self.table_prefix, self.table_name])
def load(self, data: dict) -> dict:
""" 装载数据 """
return self.objects().load(data)
def to_json(self, data: dict) -> dict:
""" 转为json """
return self.objects().to_json(data)
def to_db(self, data: dict) -> dict:
""" 转为数据库数据 """
return self.objects().to_db(data)
def new_operation(self, operation: str, filter: dict, update: dict) -> Operation:
""" 获取一个新的操作 """
return self.database.new_operation(self.table_name).set_operation(operation, filter, update)
async def find_one(self, filter: dict, raw: bool = False, **kwargs) -> dict:
query = self.database.new_query(self.table_name).set_query(filter)
data = await self.database.find_one(query, **kwargs)
if raw:
return data
return self.load(data)
async def search(self, filter: dict, raw: bool = False, max_length: int = 1000, **kwargs) -> Tuple[dict, list]:
query = self.database.new_query(self.table_name).set_query(filter)
query.max_length = max_length
meta, data = await self.database.search(query, **kwargs)
if raw:
return meta, data
data = [self.load(i) for i in data]
return meta, data
async def execute(self, operation_list: List[Operation], **kwargs) -> dict:
if not self.index_flag:
self.index_flag = True
await self.database.init_index(self.load_index())
return await self.database.execute(operation_list, **kwargs)
def load_index(self):
""" 读取索引配置 """
operation_list: List[Operation] = []
for name, field in self.load_fields().items():
if field.unique:
operation_list.append(self.new_operation('index', filter={name: 1}, update={'unique': True}))
elif field.index:
operation_list.append(self.new_operation('index', filter={name: 1}, update={}))
else:
pass
return operation_list
async def insert(
self, update: dict, return_operation: bool = False, **kwargs) -> Union[List[Operation], dict]:
""" 快捷新增 """
# 如果没有主键字段,则自动新增一个
if self.id_field is not None and self.id_field in update:
update[self.id_field] = self.database.new_id(self.id_prefix)
# 写入数据库
operation = self.new_operation('insert', {}, self.to_db(update))
if return_operation:
return [operation]
result = await self.execute([operation], **kwargs)
return result.get(self.table_name, {})
async def update(
self, filter: dict, update: dict, return_operation: bool = False, **kwargs) -> Union[List[Operation], dict]:
""" 快捷更新 """
operation = self.new_operation('update', filter, update)
if return_operation:
return [operation]
result = await self.execute([operation], **kwargs)
return result.get(self.table_name, {})
async def delete(
self, filter: dict, return_operation: bool = False, **kwargs) -> Union[List[Operation], dict]:
""" 快捷删除 """
now = datetime.datetime.now().astimezone()
op = filter.get('op', 'system')
_, data = await self.search(filter)
operation_list = [self.new_operation('delete', filter, {})]
for i in data:
i.update({
'delete_at': now,
'delete_by': op,
})
insert_operation = self.new_operation('insert', {}, i)
insert_operation.table_name = f'__delete_{self.table_name}'
operation_list.append(insert_operation)
if return_operation:
return operation_list
result = await self.execute(operation_list, **kwargs)
return result.get(self.table_name, {})
# 模型映射表
model_mapping: Dict[str, Model] = {} | zawn-orm | /zawn_orm-0.0.16.tar.gz/zawn_orm-0.0.16/zawn_orm/model.py | model.py |
import logging
import re
from datetime import datetime, timedelta
from decimal import Decimal
from typing import List, Tuple, Any, Union
from bson import codec_options, ObjectId, decimal128
from motor.core import AgnosticCollection
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo import InsertOne, UpdateMany, DeleteMany, IndexModel
from zawn_orm.codec import DefaultCodec, CodecFactory
from zawn_orm.database import Database, Query, Operation
from zawn_orm.exception import InvalidOperationException, InvalidValueException
from zawn_orm.model import model_mapping
from zawn_orm.tool import datetime_format, rounding
class SearchUtils(object):
""" 列表搜索基类,模版模式 """
key_regex = re.compile('^[0-9a-zA-Z_]+$')
default_skip: int = 0
default_limit: int = 10
def __init__(self, collection):
self.collection = collection
def match(self, conditions: List[dict]):
query = [{'$match': {'$and': conditions}}]
return query
def sort_desc_by_create_time(self):
query = [{'$sort': {'create_at': -1, '_id': -1}}]
return query
def skip_limit(self, skip: int = default_skip, limit: int = default_limit):
query = []
if skip > 0:
query.append({'$skip': skip})
if limit > 0:
query.append({'$limit': limit})
return query
def unwind(self, path: str, preserve: bool = True):
query = [{'$unwind': {'path': path, 'preserveNullAndEmptyArrays': preserve}}]
return query
def join(
self, from_collection: str, local_field: str, foreign_field: str,
as_name: str, unwind_path: str = None, set_fields: dict = None,
preserve: bool = True):
query = [
{'$lookup': {
'from': from_collection,
'localField': local_field,
'foreignField': foreign_field,
'as': as_name,
}},
]
# 这里改为最后参数unwind_path不是None时,才增加$unwind语句
if unwind_path is not None:
query.extend(self.unwind(unwind_path, preserve))
if set_fields:
query.extend([
{'$set': {
i: f'${as_name}.{set_fields[i]}' for i in set_fields
}},
{'$unset': [as_name]},
])
return query
def group_by(self, fields: [dict, str], count: dict):
''' 分组统计 '''
query = []
group_query = {'$group': {
'_id': fields,
'root': {'$mergeObjects': '$$ROOT'},
}}
group_query['$group'].update(count)
query.append(group_query)
if len(count) != 0:
query.append({'$set': {f'root.{i}': f'${i}' for i in count}})
query.append({'$replaceRoot': {'newRoot': '$root'}})
return query
def join2(self, from_collection, join_fields, set_fields):
''' 联表查询语句,多字段联表
set_fields={原表字段名: 关联表字段名}
*QueryUtils.join2(
'aid_train_result',
{'train_code': 'train_code', 'aid_user_code': 'aid_user_code'},
{
'theory_result': 'theory_result',
'trial_result': 'trial_result',
'recovery_result': 'recovery_result',
'rescue_result': 'rescue_result',
'attendance': 'attendance',
},
),
'''
let_fields = {}
conditions = []
for k in join_fields:
let_fields[k] = f'${k}'
conditions.append({'$eq': [f'${join_fields[k]}', f'$${k}']})
conditions.append({'$ne': ['$status', '-1']})
for k in set_fields:
set_fields[k] = {'$first': f'${from_collection}.{set_fields[k]}'}
pipeline = [
{'$lookup': {
'from': from_collection,
'let': let_fields,
'pipeline': [
{'$match': {'$expr': {'$and': conditions}}},
],
'as': from_collection,
}},
{'$set': set_fields},
{'$unset': [from_collection]},
]
return pipeline
def range_query(
self, key: str, value: [list, str],
query_type: str, offset_time: timedelta = timedelta()):
''' 生成返回搜索条件
搜索类型 query_type:[date,datetime,number,code]
'''
query = []
if query_type == 'date':
date_format = '%Y-%m-%d'
one_day = timedelta(days=1)
if isinstance(value, list) and len(value) == 2:
start = datetime.strptime(value[0], date_format) + offset_time
end = datetime.strptime(value[1], date_format) + offset_time + one_day
query.append({key: {'$gte': start, '$lt': end}})
elif isinstance(value, str):
dt = datetime.strptime(value, date_format) + offset_time
query.append({key: {'$gte': dt, '$lt': dt + one_day}})
else:
raise Exception(f'时间搜索条件 {key}:[start,end]')
elif query_type == 'datetime':
datetime_format = '%Y-%m-%dT%H:%M:%S.%fZ'
if isinstance(value, list) and len(value) == 2:
start = datetime.strptime(value[0], datetime_format)
end = datetime.strptime(value[1], datetime_format)
query.append({key: {'$gte': start, '$lt': end}})
elif isinstance(value, str):
query.append({key: datetime.strptime(value, datetime_format)})
else:
raise Exception(f'时间搜索条件 {key}:[start,end]')
elif query_type == 'number':
if isinstance(value, list) and len(value) == 2:
query.append({key: {'$gte': value[0], '$lte': value[1]}})
elif isinstance(value, (int, float)):
query.append({key: value})
elif isinstance(value, str):
query.append({key: float(value)})
else:
raise Exception(f'数字搜索条件 {key}:[start,end]')
elif query_type == 'code':
if isinstance(value, list):
query.append({key: {'$in': value}})
elif isinstance(value, str):
query.append({key: value})
else:
raise Exception(f'数字搜索条件 {key}:[code1,code2,...]')
elif query_type == 'status':
if isinstance(value, list):
query.append({key: {'$in': value}})
elif isinstance(value, str):
query.append({key: value})
else:
query.append({key: {'$ne': '-1'}})
else:
raise Exception('必传query_type:[date,datetime,number,code]')
return query
def add_filter(self, raw_query: dict) -> List[dict]:
""" 加入过滤条件 """
pipeline = []
query = {'status': raw_query.pop('status', {'$nin': ['S3', 'S8']})}
for key, value in raw_query.items():
if not self.key_regex.match(key):
continue
query[key] = value
filter_or = raw_query.get('$or')
if isinstance(filter_or, list):
query['$or'] = filter_or
pipeline.append({'$match': query})
return pipeline
def add_paging(self, raw_query: dict) -> List[dict]:
""" 加入分页条件 """
pipeline = []
sort = raw_query.get('$sort')
skip = raw_query.get('$skip') or self.default_skip
limit = raw_query.get('$limit') or self.default_limit
pipeline.append({'$sort': (sort if sort else {'create_at': -1})})
if skip > 0:
pipeline.append({'$skip': skip})
if limit > 0:
pipeline.append({'$limit': limit})
return pipeline
def add_output(self, raw_query: dict) -> List[dict]:
""" 加入输出条件 """
pipeline = []
project = raw_query.get('$project')
pipeline.append({'$project': (project if project else {'_id': 0})})
return pipeline
async def find_one(self, raw_query: dict) -> Tuple[list, dict]:
""" 执行查找方法 """
pipeline, data = await self.search(raw_query)
return pipeline, (data and data[0] or {})
async def search(self, raw_query: dict) -> Tuple[List[dict], List[dict]]:
""" 执行搜索方法 """
append_pipeline = raw_query.get('$pipeline') or []
pipeline = [
*self.add_filter(raw_query), # 过滤条件
*self.add_paging(raw_query), # 排序分页
*append_pipeline, # 其他操作
*self.add_output(raw_query), # 输出字段
]
data = await self.collection.aggregate(pipeline)
return pipeline, data
async def count(self, raw_query: dict) -> dict:
""" 执行统计方法 """
skip = raw_query.get('$skip') or self.default_skip
skip = int(skip) if skip > 0 else 0
limit = raw_query.get('$limit') or self.default_limit
limit = int(limit) if limit > 0 else 0
pipeline = [
*self.add_filter(raw_query),
{'$count': 'total'},
]
data = await self.collection.aggregate(pipeline)
total = data and data[0].get('total', 0) or 0
return {'skip': skip, 'limit': limit, 'total': total}
class MongoDBQuery(Query):
""" MongoDB查询类 """
default_limit: int = 20
def paging(self, pragma_dict: dict) -> Tuple[dict, int, int]:
""" 获取分页条件 """
sort = pragma_dict.get('$sort') or {}
if not isinstance(sort, dict):
sort = {}
skip = pragma_dict.get('$skip') or 0
if isinstance(skip, int):
skip = 0 if skip <= 0 else skip
else:
skip = 0
# limit为0时不限制,大于0时增加限制,其他默认20行限制
limit = pragma_dict.get('$limit') or self.default_limit
if isinstance(limit, int):
limit = 0 if limit <= 0 else limit
else:
limit = self.default_limit
return sort, skip, limit
def get_query(self) -> Tuple[str, list, list, int, int]:
""" 获取查询条件 """
new_filter = self.filter.copy()
pragma_dict = {}
count_pipeline = []
data_pipeline = []
for k, v in self.filter.items():
if not isinstance(k, str):
continue
if k.startswith('$') and k not in ['$and', '$or']:
pragma_dict[k] = new_filter.pop(k)
# 需要在分页前的过滤阶段
filter = pragma_dict.get('$filter')
if not isinstance(filter, list):
filter = []
count_pipeline.extend([
{'$match': new_filter},
*filter,
{'$count': 'count'},
])
# match阶段
data_pipeline.extend([{'$match': new_filter}, *filter])
# 分页
sort, skip, limit = self.paging(pragma_dict)
if sort:
data_pipeline.append({'$sort': sort})
if skip:
data_pipeline.append({'$skip': skip})
if limit:
data_pipeline.append({'$limit': limit})
pragma_handler_list = (
('$pipeline', lambda x: x if isinstance(x, list) else []),
('$unset', lambda x: [{'$unset': x}] if isinstance(x, list) else []),
('$project', lambda x: [{'$project': x}] if isinstance(x, dict) else []),
)
for k, handler in pragma_handler_list:
value = pragma_dict.get(k)
if value is None:
continue
stage = handler(value)
data_pipeline.extend(stage)
return self.table_name, count_pipeline, data_pipeline, skip, limit
class MongoDBOperation(Operation):
""" MongoDB操作类 """
def get_operation(self) -> Tuple[str, Any]:
""" 获取操作对象 """
# 操作相关的模型
model = model_mapping.get(self.table_name)
if self.operation == 'insert':
update = self.update
if model:
update = model.to_db(update)
operation_object = InsertOne(document=update)
elif self.operation == 'update':
# 其他操作类型的字段值
update = {k: v for k, v in self.update.items() if k.startswith('$')}
# 直接设置到字段值
set_update = {k: v for k, v in self.update.items() if not k.startswith('$')}
if model:
set_update = model.to_db(set_update)
if set_update:
update['$set'] = set_update
operation_object = UpdateMany(filter=self.filter, update=update)
elif self.operation == 'delete':
operation_object = DeleteMany(filter=self.filter)
elif self.operation == 'index':
keys = [(k, v) for k, v in self.filter.items()]
operation_object = IndexModel(keys=keys, **self.update)
else:
raise InvalidOperationException(f'{self.operation}是无效操作')
return self.table_name, operation_object
class MongoDBDecimalCodec(codec_options.TypeCodec):
python_type = Decimal
bson_type = decimal128.Decimal128
def transform_python(self, value):
return decimal128.Decimal128(value)
def transform_bson(self, value):
return value.to_decimal()
class IDCodec(DefaultCodec):
""" 唯一主键编解码类 """
type_name = 'id'
def on_load(self, data: str) -> ObjectId:
return ObjectId(data)
def on_to_json(self, data: ObjectId) -> str:
return str(data)
def on_to_db(self, data: ObjectId) -> ObjectId:
return data
class DecimalCodec(DefaultCodec):
""" 十进制数字编解码类 """
type_name = 'decimal'
def on_load(self, data: Union[str, int, float, Decimal]) -> Decimal:
return rounding(Decimal(data), 4)
def on_to_json(self, data: Decimal) -> str:
return str(rounding(Decimal(data), 4))
def on_to_db(self, data: Decimal) -> Decimal:
return rounding(Decimal(data), 4)
class DatetimeCodec(DefaultCodec):
""" 日期时间编解码类 """
type_name = 'datetime'
def on_load(self, data: Union[str, int, float, datetime]) -> datetime:
if isinstance(data, str):
return datetime.strptime(data, datetime_format).astimezone()
elif isinstance(data, (int, float)):
return datetime.fromtimestamp(int(data / 1000)).astimezone()
elif isinstance(data, datetime):
return data.astimezone()
raise InvalidValueException(f'{str(data)}无法转换成{self.type_name}类型')
def on_to_json(self, data: Union[datetime]) -> str:
if isinstance(data, datetime):
return data.astimezone().strftime(datetime_format)
raise InvalidValueException(f'{str(data)}无法转换成{self.type_name}类型')
def on_to_db(self, data: Union[str, int, float, datetime]) -> datetime:
return self.on_load(data)
class MongoDB(Database):
""" MongoDB数据库的实现类 """
database_type: str = 'mongodb'
def init(self, model_class, **kwargs):
for codec_class in (IDCodec, DecimalCodec, DatetimeCodec):
CodecFactory.set(codec_class, True)
model_class.id_field = '_id'
return self
def connect(self, uri: str) -> 'Database':
logging.info(f'连接MongoDB')
self_codec_options = codec_options.CodecOptions(
type_registry=codec_options.TypeRegistry([MongoDBDecimalCodec()]),
tz_aware=True,
)
self.client = AsyncIOMotorClient(uri, tz_aware=True)
self.database = self.client.get_default_database(codec_options=self_codec_options)
return self
async def close(self):
await self.client.close()
self.client = None
self.database = None
def new_id(self, id_prefix='') -> ObjectId:
""" 获取一个新的id """
return ObjectId()
def new_query(self, table_name: str) -> Query:
""" 获取一个新的查询 """
return MongoDBQuery(table_name)
def new_operation(self, table_name: str) -> Operation:
""" 获取一个新的操作 """
operation = MongoDBOperation(table_name)
return operation
async def on_find_one(self, query: Query, **kwargs) -> dict:
query.filter.update({'$sort': {'_id': 1}, '$skip': 0, '$limit': 1})
table_name, _, data_pipeline, _, _ = query.get_query()
collection: AgnosticCollection = self.database[table_name]
data = await collection.aggregate(data_pipeline, **kwargs).to_list(1)
return data[0] if (data and isinstance(data, list)) else {}
async def on_search(self, query: Query, **kwargs) -> Tuple[dict, list]:
table_name, count_pipeline, data_pipeline, skip, limit = query.get_query()
collection: AgnosticCollection = self.database[table_name]
count = await collection.aggregate(count_pipeline, **kwargs).to_list(query.max_length)
total = count and count[0]['count'] or 0
data = await collection.aggregate(data_pipeline, **kwargs).to_list(query.max_length)
return {'skip': skip, 'limit': limit, 'total': total}, data
async def on_execute(self, operation_list: List[Operation], **kwargs) -> dict:
bulk_dict = {}
for op in operation_list:
if op.operation not in ['insert', 'update', 'delete']:
continue
if op.table_name not in bulk_dict:
bulk_dict[op.table_name] = []
_table_name, _operation_object = op.get_operation()
bulk_dict[_table_name].append(_operation_object)
assert_dict = kwargs.get('assert_dict') or {}
result_dict = {}
async with await self.client.start_session() as session:
session.start_transaction()
for collection_name, bulk_list in bulk_dict.items():
collection: AgnosticCollection = self.database[collection_name]
result = {
'insert': 0,
'update': 0,
'delete': 0,
}
for i in range(0, len(bulk_list), 1000):
_result = await collection.bulk_write(bulk_list[i:i + 1000], session=session)
result['insert'] += _result.bulk_api_result['nInserted']
result['update'] += _result.bulk_api_result['nModified']
result['delete'] += _result.bulk_api_result['nRemoved']
result_dict[collection_name] = result
# 检查断言
_assert_dict = assert_dict.get(collection_name)
if _assert_dict and isinstance(_assert_dict, dict):
for i in ['insert', 'update', 'delete']:
expected_value = _assert_dict.get(i)
if expected_value is None:
continue
if expected_value != result[i]:
await session.abort_transaction()
message = f'[{collection_name}]表的[{i}]操作预期行数[{expected_value}],实际行数[{result[i]}]'
raise Exception(message)
await session.commit_transaction()
return result_dict
async def init_index(self, operation_list: List[Operation]) -> dict:
""" 初始化索引 """
bulk_dict = {}
for op in operation_list:
if op.operation != 'index':
continue
if op.table_name not in bulk_dict:
bulk_dict[op.table_name] = []
_table_name, _operation_object = op.get_operation()
bulk_dict[_table_name].append(_operation_object)
result_dict = {}
for collection_name, bulk_list in bulk_dict.items():
collection: AgnosticCollection = self.database[collection_name]
result_dict[collection_name] = await collection.create_indexes(bulk_list)
return result_dict | zawn-orm | /zawn_orm-0.0.16.tar.gz/zawn_orm-0.0.16/zawn_orm/impl/mongodb.py | mongodb.py |
from zawn_state_helper import base
class GeneralContext(base.BaseContext):
""" 一般上下文类 """
def __init__(self, ):
state_class_list = list(GeneralState.__subclasses__())
super().__init__(state_class_list)
class GeneralState(base.BaseState):
""" 一般状态类 """
def input(self, context: base.BaseContext) -> bool:
""" 录入事件 """
return False
def submit(self, context: base.BaseContext) -> bool:
""" 提交事件 """
return False
def delete(self, context: base.BaseContext) -> bool:
""" 删除事件 """
return False
def approve(self, context: base.BaseContext) -> bool:
""" 审核事件 """
return False
def refuse(self, context: base.BaseContext) -> bool:
""" 拒绝事件 """
return False
def agree(self, context: base.BaseContext) -> bool:
""" 同意事件 """
return False
def publish(self, context: base.BaseContext) -> bool:
""" 发布事件 """
return False
def withdraw(self, context: base.BaseContext) -> bool:
""" 撤回事件 """
return False
def cancel(self, context: base.BaseContext) -> bool:
""" 作废事件 """
return False
class DraftState(GeneralState):
""" 草稿状态类 """
name = '草稿状态'
status = 'S1'
def input(self, context: base.BaseContext) -> bool:
context.set_state(DraftState())
return True
def submit(self, context: base.BaseContext) -> bool:
context.set_state(RecordedState())
return True
def approve(self, context: base.BaseContext) -> bool:
context.set_state(ReviewedState())
return True
class RecordedState(GeneralState):
""" 已录入状态类 """
name = '已录入状态'
status = 'S2'
class DeletedState(GeneralState):
""" 已删除状态类 """
name = '已删除状态'
status = 'S3'
class ReviewedState(GeneralState):
""" 审核中状态类 """
name = '审核中状态'
status = 'S4'
class ApprovalState(GeneralState):
""" 审核中状态类 """
name = '已审批状态'
status = 'S5'
class PublishedState(GeneralState):
""" 已发布状态类 """
name = '已发布状态'
status = 'S6'
class WithdrawnState(GeneralState):
""" 已撤回状态类 """
name = '已撤回状态'
status = 'S7'
class RevokedState(GeneralState):
""" 已作废状态类 """
name = '已作废状态'
status = 'S8'
if __name__ == '__main__':
s1 = DraftState()
context = GeneralContext()
is_success = context.request('approve', s1.status)
if not is_success:
print('没有该事件')
s4 = context.get_status_value()
print(s4)
is_success = context.request('submit')
if not is_success:
print('没有该事件')
s2 = context.get_status_value()
print(s2) | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_state_helper/general_state.py | general_state.py |
import re
from datetime import timedelta, datetime
from typing import List
class SearchHelper(object):
""" 搜索助手类 """
key_regex = re.compile('^[0-9a-zA-Z_]+$')
default_skip: int = 0
default_limit: int = 10
@classmethod
def match(cls, conditions: List[dict]):
query = [{'$match': {'$and': conditions}}]
return query
@classmethod
def sort_desc_by_create_time(cls):
query = [{'$sort': {'create_at': -1, '_id': -1}}]
return query
@classmethod
def skip_limit(cls, skip: int = default_skip, limit: int = default_limit):
query = []
if skip > 0:
query.append({'$skip': skip})
if limit > 0:
query.append({'$limit': limit})
return query
@classmethod
def unwind(cls, path: str, preserve: bool = True):
query = [{'$unwind': {'path': path, 'preserveNullAndEmptyArrays': preserve}}]
return query
@classmethod
def join(
cls, from_collection: str, local_field: str, foreign_field: str,
as_name: str, unwind_path: str = None, set_fields: dict = None,
preserve: bool = True):
query = [
{'$lookup': {
'from': from_collection,
'localField': local_field,
'foreignField': foreign_field,
'as': as_name,
}},
]
# 这里改为最后参数unwind_path不是None时,才增加$unwind语句
if unwind_path is not None:
query.extend(cls.unwind(unwind_path, preserve))
if set_fields:
query.extend([
{'$set': {
i: f'${as_name}.{set_fields[i]}' for i in set_fields
}},
{'$unset': [as_name]},
])
return query
@classmethod
def group_by(cls, fields: [dict, str], count: dict):
''' 分组统计 '''
query = []
group_query = {'$group': {
'_id': fields,
'root': {'$mergeObjects': '$$ROOT'},
}}
group_query['$group'].update(count)
query.append(group_query)
if len(count) != 0:
query.append({'$set': {f'root.{i}': f'${i}' for i in count}})
query.append({'$replaceRoot': {'newRoot': '$root'}})
return query
@classmethod
def join2(cls, from_collection, join_fields, set_fields):
''' 联表查询语句,多字段联表
set_fields={原表字段名: 关联表字段名}
*QueryUtils.join2(
'aid_train_result',
{'train_code': 'train_code', 'aid_user_code': 'aid_user_code'},
{
'theory_result': 'theory_result',
'trial_result': 'trial_result',
'recovery_result': 'recovery_result',
'rescue_result': 'rescue_result',
'attendance': 'attendance',
},
),
'''
let_fields = {}
conditions = []
for k in join_fields:
let_fields[k] = f'${k}'
conditions.append({'$eq': [f'${join_fields[k]}', f'$${k}']})
conditions.append({'$ne': ['$status', '-1']})
for k in set_fields:
set_fields[k] = {'$first': f'${from_collection}.{set_fields[k]}'}
pipeline = [
{'$lookup': {
'from': from_collection,
'let': let_fields,
'pipeline': [
{'$match': {'$expr': {'$and': conditions}}},
],
'as': from_collection,
}},
{'$set': set_fields},
{'$unset': [from_collection]},
]
return pipeline
@classmethod
def range_query(
cls, key: str, value: [list, str],
query_type: str, offset_time: timedelta = timedelta()):
''' 生成返回搜索条件
搜索类型 query_type:[date,datetime,number,code]
'''
query = []
if query_type == 'date':
date_format = '%Y-%m-%d'
one_day = timedelta(days=1)
if isinstance(value, list) and len(value) == 2:
start = datetime.strptime(value[0], date_format) + offset_time
end = datetime.strptime(value[1], date_format) + offset_time + one_day
query.append({key: {'$gte': start, '$lt': end}})
elif isinstance(value, str):
dt = datetime.strptime(value, date_format) + offset_time
query.append({key: {'$gte': dt, '$lt': dt + one_day}})
else:
raise Exception(f'时间搜索条件 {key}:[start,end]')
elif query_type == 'datetime':
datetime_format = '%Y-%m-%dT%H:%M:%S.%fZ'
if isinstance(value, list) and len(value) == 2:
start = datetime.strptime(value[0], datetime_format)
end = datetime.strptime(value[1], datetime_format)
query.append({key: {'$gte': start, '$lt': end}})
elif isinstance(value, str):
query.append({key: datetime.strptime(value, datetime_format)})
else:
raise Exception(f'时间搜索条件 {key}:[start,end]')
elif query_type == 'number':
if isinstance(value, list) and len(value) == 2:
query.append({key: {'$gte': value[0], '$lte': value[1]}})
elif isinstance(value, (int, float)):
query.append({key: value})
elif isinstance(value, str):
query.append({key: float(value)})
else:
raise Exception(f'数字搜索条件 {key}:[start,end]')
elif query_type == 'code':
if isinstance(value, list):
query.append({key: {'$in': value}})
elif isinstance(value, str):
query.append({key: value})
else:
raise Exception(f'数字搜索条件 {key}:[code1,code2,...]')
elif query_type == 'status':
if isinstance(value, list):
query.append({key: {'$in': value}})
elif isinstance(value, str):
query.append({key: value})
else:
query.append({key: {'$ne': '-1'}})
else:
raise Exception('必传query_type:[date,datetime,number,code]')
return query | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_helper/search_helper.py | search_helper.py |
import base64
import hashlib
import json
import time
from typing import Tuple
class TokenHelper(object):
""" token助手 """
_ENCODING = 'utf-8'
_PRIVATE_KEY = '[email protected]'.encode(_ENCODING)
_KEY = None
_EXP_OFFSET = 86400 # 过期偏移量,秒
@classmethod
def set_key(cls, key: str) -> None:
""" 设置加密密钥 """
cls._KEY = key.encode(cls._ENCODING)
@classmethod
def get_sign_string(cls, data: dict) -> str:
""" 获取哈希字符串,返回大写签名字符串 """
# 实例化哈希对象
hash = hashlib.sha3_256()
hash.update(cls._KEY)
# 对字典对key进行排序处理
keys = sorted([i for i in data.keys() if isinstance(i, str)])
# 把所有key和value加入哈希算法进行处理
for index, key in enumerate(keys):
hash.update(key.encode(cls._ENCODING))
value = data[key]
if not isinstance(value, str):
value = str(value)
hash.update(value.encode(cls._ENCODING))
hash.update(cls._PRIVATE_KEY)
hash.update(cls._KEY)
hash.update(f'{index % 3}'.encode(cls._ENCODING))
# 返回大写签名字符串
return hash.hexdigest().upper()
@classmethod
def sign(cls, data: dict) -> str:
""" 对数据进行签名,返回令牌 """
data = data.copy()
# 令牌过期时间,如果没有传入值,则以当前时间+偏移量
data['exp'] = isinstance(data.get('exp'), int) and abs(data.get('exp')) or int(time.time() + cls._EXP_OFFSET)
# 获取签名字符串,加入数据sign字段
data['sign'] = cls.get_sign_string(data)
# 输出json字符串
token = json.dumps(data, allow_nan=False, separators=(',', ':'), sort_keys=True)
return base64.b64encode(token.encode(cls._ENCODING)).decode(cls._ENCODING)
@classmethod
def decode(cls, token: str) -> dict:
""" 解码令牌 """
token = base64.b64decode(token.encode(cls._ENCODING) + b'===')
return json.loads(token)
@classmethod
def validate(cls, token: str) -> Tuple[bool, str, dict]:
""" 校验令牌 """
data = cls.decode(token)
# 获取过期时间
if not isinstance(data.get('exp'), int):
return False, '过期时间解析失败[exp]', data
# 检查过期时间
if data['exp'] < int(time.time()):
return False, '令牌已过期[exp]', data
# 获取签名字段
if not data.get('sign'):
return False, '缺少签名字段[sign]', data
# 校验签名
sign = data.pop('sign')
_sign = cls.get_sign_string(data)
if sign != _sign:
return False, '签名不一致[sign]', data
return True, '签名校验成功', data
if __name__ == '__main__':
key = 'baserver'
TokenHelper.set_key(key)
data = {'user_id': 'my_user_id'}
print(data)
token = TokenHelper.sign(data)
print(token)
data = TokenHelper.validate(token)
print(data) | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_helper/token_helper.py | token_helper.py |
import datetime
from io import BytesIO
from typing import List
from zawn_orm.database import Operation
from zawn_workflow.context.base import NodeFactory
from zawn_workflow.context.context import BaseContext
class WorkFlowRepository(BaseContext):
""" 工作流程仓库 """
def import_form_data(self, form_data: list, **kwargs) -> BaseContext:
""" 导入表单数据 """
now = datetime.datetime.now().astimezone()
op = kwargs.get('op', 'system')
work_form = self.data_factory.work_form
form_dict = {}
form_label_list = []
for row in form_data:
form_id = row['表单编号']
form_label = row['表单标签']
form_dict[form_id] = {
'form_id': form_id,
'form_label': form_label,
'form_name': row['表单字段'],
'form_type': row['表单类型'],
'placeholder': row['占位符'],
'disabled': row['能否编辑'] in ['Y', '能', '是'],
'required': row['是否必填'] in ['Y', '能', '是'],
**kwargs,
}
form_label_list.append(form_label)
# 更新旧表单的状态为已删除
self.operation_list.append(work_form.new_operation(
'update',
filter={'form_label': {'$in': list(set(form_label_list))}, **kwargs},
update={'$set': work_form.to_db({'status': work_form.state.DELETE, 'update_at': now, 'update_by': op})},
))
# 新增表单
for _, form in form_dict.items():
self.operation_list.append(work_form.new_operation(
'insert',
filter={},
update=work_form.to_db(form)
))
return self
def import_model_data(
self, model_data: list, **kwargs) -> BaseContext:
""" 导入模型列表 """
work_model = self.data_factory.work_model
model_dict = {}
for row in model_data:
model_id = row['模型编号']
model_dict[model_id] = {
'model_id': model_id,
'model_key': row['模型KEY'],
'model_type': row['模型分类'],
'model_name': row['模型名称'],
**kwargs,
}
for _, v in model_dict.items():
self.operation_list.append(work_model.new_operation(
'insert',
filter={},
update=work_model.to_db(v)
))
return self
def import_node_data(self, node_data: list, **kwargs) -> BaseContext:
""" 导入节点列表 """
work_node = self.data_factory.work_node
node_dict = {}
for i, row in enumerate(node_data):
node_id = row['节点编号']
model_id = row['模型编号']
if model_id not in node_dict:
node_dict[model_id] = []
node_dict[model_id].append({
'node_id': node_id,
'parent_id': row['父节点编号'],
'node_name': row['节点名称'],
'node_type': row['节点类型'],
'sort': i,
'form_label': row['表单标签'],
'model_id': model_id,
**kwargs,
})
for model_id, node_list in node_dict.items():
context = NodeFactory.make_node(node_list)
if context is None:
continue
for node in context.root_node.to_node_list():
self.operation_list.append(work_node.new_operation(
'insert',
filter={},
update=work_node.to_db(node)
))
return self
def import_xlsx(self, xlsx_data: BytesIO, **kwargs):
""" 导入定义表格 """
form_data = self.read_xlsx(xlsx_data, 0)
model_data = self.read_xlsx(xlsx_data, 1)
node_data = self.read_xlsx(xlsx_data, 2)
if not (model_data and node_data and form_data):
return self
# 清空所有用户和用户组数据,重新写入最新数据
self.operation_list: List[Operation] = [
self.data_factory.work_model.new_operation(
'delete',
filter=kwargs,
update={}
),
self.data_factory.work_node.new_operation(
'delete',
filter=kwargs,
update={}
),
self.data_factory.work_form.new_operation(
'delete',
filter=kwargs,
update={}
),
]
self.import_form_data(form_data, **kwargs)
self.import_model_data(model_data, **kwargs)
self.import_node_data(node_data, **kwargs)
return self | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_workflow/context/repository.py | repository.py |
import datetime
from zawn_workflow.context.base import NodeFactory
from zawn_workflow.context.context import BaseContext
from zawn_workflow.context.searcher import WorkFlowSearcher
from zawn_workflow.data.exception import ModelNotFoundError
class WorkFlowRuntime(BaseContext):
""" 工作流程运行时 """
def task_tick(self, instance_id: str, task_list: list) -> BaseContext:
""" 任务列表载入工作流程引擎执行 """
work_task = self.data_factory.work_task
task_field_mapping = {
'node_id': 'task_id',
'node_name': 'task_name',
'node_type': 'task_type',
}
context = NodeFactory.make_node(task_list, mapping=task_field_mapping).tick()
new_task_list = context.to_node_list(mapping=task_field_mapping)
for task in new_task_list:
self.operation_list.append(work_task.new_operation(
'insert', filter={}, update=work_task.to_db(task),
))
return self
def copy_node_to_task(self, node_list: list, instance_id: str, **kwargs) -> BaseContext:
""" 复制节点数据到任务数据 """
now = datetime.datetime.now().astimezone()
op = kwargs.get('op', 'system')
work_instance = self.data_factory.work_instance
# 创建任务id,组合成映射表
id_dict = {}
for node in node_list:
id_dict[node['node_id']] = work_instance.new_id()
# 组装任务操作列表
task_list = []
for node in node_list:
node_id = node['node_id']
parent_id = node['parent_id']
task_id = id_dict[node_id]
task = {
'task_id': task_id,
'parent_id': id_dict[parent_id],
'task_name': node['node_name'],
'task_type': node['node_type'],
'sort': node['sort'],
'handler_type': node['handler_type'],
'handler_id': node['handler_id'],
'instance_id': instance_id,
**kwargs,
'status': work_instance.state.SUBMIT,
'create_at': now,
'create_by': op,
}
task_list.append(task)
# 执行工作流程
self.task_tick(instance_id, task_list)
return self
def create_instance(self, model_id: str, **kwargs) -> BaseContext:
""" 创建工作实例 """
# 1. 复制
work_flow_searcher = WorkFlowSearcher()
meta, detail_list = (await work_flow_searcher.search_model_detail(model_id, **kwargs)).search()
if meta['total'] == 0:
raise ModelNotFoundError(f'工作模型未找到:{model_id}')
work_instance = self.data_factory.work_instance
work_task = self.data_factory.work_task
work_record = self.data_factory.work_record
work_store = self.data_factory.work_store
for model in detail_list:
model_id = model['model_id']
instance_id = work_instance.new_id()
nodes = model.pop('nodes', None) or []
forms = model.pop('forms', None) or []
if not nodes:
self.logger.info(f'模型没有配置节点信息:{model_id}')
continue
self.operation_list.append(work_instance.new_operation(
'insert', filter={}, update=work_instance.to_db({
'instance_id': instance_id,
'instance_name': model['model_name'],
'step': 0,
'completion_rate': 0,
'model_id': model_id,
})
))
self.copy_node_to_task(nodes, instance_id)
return self
def start(self, model_key: str, handler_id: str, **kwargs) -> BaseContext:
""" 启动 """
# 启动事件过程
# 1、检查模型和用户是否存在,如果不存在则返回提示
# 2、创建实例
# 3、复制节点到任务
# 4、执行一次tick
# 5、执行一次同意时间
work_flow_searcher = WorkFlowSearcher()
meta, model_list = await (await work_flow_searcher.search_model(model_key=model_key, **kwargs)).search()
if meta['total'] != 1:
return self
meta, user_list = await (await work_flow_searcher.search_user([handler_id], **kwargs)).search()
if meta['total'] == 0:
return self
return self
async def agree(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 同意事件 """
pass
async def reject(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 拒绝事件 """
pass
async def assign(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 指派事件 """
pass
async def entrust(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 委托事件 """
pass | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_workflow/context/runtime.py | runtime.py |
import logging
from abc import ABC
from typing import Union, List, Optional, Type
from zawn_workflow.data.exception import NodeNotFoundError
class NodeType(object):
""" 节点类型 """
root = 'root' # 根
originate = 'originate' # 发起
approval = 'approval' # 审批
serial = 'serial' # 串行
join = 'join' # 会签
parallel = 'parallel' # 并行
condition = 'condition' # 条件
loop = 'loop' # 循环
class NodeStatus(object):
""" 节点状态 """
FUTURE: str = 'FUTURE' # 现在还没有走到的节点状态
WAITING: str = 'WAITING' # 只有复杂节点有该状态,表示在等待子节点审批
READY: str = 'READY' # 可以进行审批操作的简单节点是Ready状态
SKIP: str = 'SKIP' # 当一个并行节点的子节点状态为非(Ready, Waiting)时,其它兄弟节点及其子节点的状态被置为Skip
COMPLETE: str = 'COMPLETE' # 已经审批完成的节点状态
class HandlerType(object):
""" 处理人类型 """
STATIC: str = 'STATIC' # 设计阶段静态录入人员
FORM: str = 'FORM' # 运行阶段表单动态录入人员
RELATION: str = 'RELATION' # 通过映射函数获取人员
class Context(object):
""" 上下文类 """
def __init__(self):
self.root_node: Optional[Node] = None
self._data: List[dict] = []
self._node_dict: dict = {}
@property
def data(self) -> dict:
return_data: dict = self._data.pop(0) if len(self._data) > 0 else {}
return return_data
@data.setter
def data(self, value: dict):
self._data.append(value)
def register(self, node: 'Node'):
""" 注册节点信息 """
if self.root_node is None:
self.root_node = node
self._node_dict[node.node_id] = node
def get_node(self, node_id: str) -> 'Node':
""" 通过node_id获取节点 """
return self._node_dict.get(node_id)
def change_status(self, node_id: str, status: str):
""" 修改节点状态 """
try:
node = self._node_dict[node_id]
node.status = status
except Exception as e:
message = f'{e}'
logging.error(message, exc_info=True)
def tick(self, form_data: dict) -> 'Context':
""" 嘀嗒方法,每帧刷新新数据 """
self._data = form_data
self.root_node.tick()
return self
def to_node_list(self, node_list: List[dict] = None, parent_id: str = '', mapping: dict = None) -> List[dict]:
""" 转成节点列表 """
self.root_node.to_node_list(node_list=node_list, parent_id=parent_id, mapping=mapping)
class Node(ABC):
""" 节点类 """
node_type = 'base'
def __init__(self, node_id: str, node_name: str, raw_data: dict):
self.node_id: str = node_id
self.node_name: str = node_name
self.parent: Optional[Node] = None
self.children: List[Node] = []
self.max_children: int = -1
self._context: Optional[Context] = None
self._status: str = NodeStatus.FUTURE
# 处理类型、处理人id
self._handler_type: str = HandlerType.STATIC
self._handler_id: str = '' # 静态时为指定id,动态时为上下文的字段名,映射时为函数名称
# 分支条件,下标和子节点对应,不足时不执行
self._conditions: List[str] = []
# 存放原始数据
self.raw_data = raw_data
@property
def context(self):
return self._context
@context.setter
def context(self, value: Context):
self._context = value
@property
def status(self):
return self._status
@status.setter
def status(self, value: str):
self._status = value
@property
def handler_type(self):
return self._handler_type
@handler_type.setter
def handler_type(self, value: str):
self._handler_type = value
@property
def handler_id(self):
return self._handler_id
@handler_id.setter
def handler_id(self, value: str):
self._handler_id = value
@property
def conditions(self):
return self._conditions
@conditions.setter
def conditions(self, value: str):
self._conditions = value
def __repr__(self):
return f'<{self.node_type} {self.node_id}.{self.node_name}>'
def __str__(self):
return f'{self.node_id}.{self.node_name}'
def __eq__(self, other):
return isinstance(other, Node) and self.node_id == other.node_id
def set_parent(self, node: 'Node'):
""" 设置父节点 """
self.parent = node
def get_children_count(self) -> int:
""" 获取子节点数量 """
return len(self.children)
def is_index_valid(self, index: int) -> bool:
""" 校验下标合法 """
return 0 <= index < self.get_children_count()
def get_child(self, index: int) -> Union['Node', None]:
""" 获取子节点 """
if index >= self.get_children_count() or index < 0:
return None
return self.children[index]
def add_child(self, node: 'Node') -> 'Node':
""" 增加子节点 """
if 0 <= self.max_children <= self.get_children_count():
return self
self.children.append(node)
return self
def remove_child(self, node: 'Node') -> 'Node':
""" 删除子节点 """
for i in range(self.get_children_count()):
if node.node_id == self.get_child(i).node_id:
self.children.pop(i)
break
return self
def to_node_list(self, node_list: List[dict] = None, parent_id: str = '', mapping: dict = None) -> List[dict]:
""" 转成节点列表 """
node_list = node_list or []
mapping = mapping or {}
id_field_name = mapping.get('node_id') or 'node_id'
name_field_name = mapping.get('node_name') or 'node_name'
type_field_name = mapping.get('node_type') or 'node_type'
sort_field_name = mapping.get('sort') or 'sort'
parent_field_name = mapping.get('parent_id') or 'parent_id'
status_field_name = mapping.get('status') or 'status'
handler_type_field_name = mapping.get('handler_type') or 'handler_type'
handler_id_field_name = mapping.get('handler_id') or 'handler_id'
conditions_field_name = mapping.get('conditions') or 'conditions'
data = self.raw_data
data.update({
id_field_name: self.node_id,
name_field_name: self.node_name,
type_field_name: self.node_type,
sort_field_name: len(node_list) + 1,
parent_field_name: parent_id,
status_field_name: self.status,
handler_type_field_name: self.handler_type,
handler_id_field_name: self.handler_id,
conditions_field_name: self.conditions,
})
node_list.append(data)
for child in self.children:
child.to_node_list(node_list=node_list, parent_id=self.node_id, mapping=mapping)
return node_list
def tick(self) -> str:
""" 嘀嗒方法,每帧刷新新数据 """
running_status = self.on_tick(self.context)
return running_status
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
return self.status
def pre_condition(self):
""" 前置条件,满足前置条件才能进入该节点 """
pass
def post_condition(self):
""" 后置条件,满足后置条件该节点才能审批完成 """
pass
def pre_script(self):
""" 前置脚本,开始审批该节点时执行 """
pass
def post_script(self):
""" 后置脚本,审批完成该节点时执行 """
class RootNode(Node):
""" 根节点 """
node_type = NodeType.root
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 逐一遍历每个子节点,当子节点处于完成,才会执行下一个子节点
# 如果节点状态是已完成,则直接返回已完成
if self.status == NodeStatus.COMPLETE:
return self.status
# 等待状态
self.status = NodeStatus.WAITING
# 遍历每个子节点,执行嘀嗒方法
# 如果嘀嗒方法返回不是完成状态,则直接返回等待状态
for child in self.children:
child_status = child.tick()
if child_status != NodeStatus.COMPLETE:
return self.status
# 当所有子节点都是完成状态时,返回完成状态
self.status = NodeStatus.COMPLETE
return self.status
class OriginateNode(Node):
""" 发起节点 """
node_type = NodeType.originate
def __init__(self, node_id: str, node_name: str, raw_data: dict):
super().__init__(node_id, node_name, raw_data)
self.max_children = 0 # 不允许添加子节点
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入准备状态,等待审核事件后进入完成状态
# 如果节点状态是已完成,则直接返回已完成
if self.status == NodeStatus.COMPLETE:
return self.status
# 准备状态,没有发起人数据时将一直处于这个状态
self.status = NodeStatus.READY
handler_id = context.data.get('handler_id')
if not handler_id:
return self.status
# 设置发起人信息,并标记完成状态
self.handler_type = HandlerType.STATIC
self.handler_id = handler_id
self.status = NodeStatus.COMPLETE
return self.status
class ApprovalNode(Node):
""" 审批节点 """
node_type = NodeType.approval
def __init__(self, node_id: str, node_name: str, raw_data: dict):
super().__init__(node_id, node_name, raw_data)
self.max_children = 0 # 不允许添加子节点
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入准备状态,等待审核事件后进入完成状态
# 如果节点状态是已完成,则直接返回已完成
if self.status == NodeStatus.COMPLETE:
return self.status
# 转换处理人信息
if self.handler_type == HandlerType.FORM:
self.handler_id = context.data.get(self.handler_id) or self.handler_id
elif self.handler_type == HandlerType.RELATION:
originator_id = context.data.get('originator_id') or ''
command = f'{self.handler_id}("{originator_id}")'
try:
handler_id = eval(command, context.data)
self.handler_id = handler_id or self.handler_id
except Exception as e:
logging.info(f'审批节点获取映射处理人出错:{command}:{e}')
else:
pass
# 准备状态,没有发起人数据时将一直处于这个状态
self.status = NodeStatus.READY
return self.status
class SerialNode(RootNode):
""" 串行节点 """
node_type = NodeType.serial
class JoinNode(Node):
""" 会签节点 """
node_type = NodeType.join
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入等待状态,等待所有子节点都是完成状态时进入完成状态
# 进入等待状态
self.status = NodeStatus.WAITING
# 同时执行子节点,并检查完成状态
status_list = []
for child in self.children:
child_status = child.tick()
status_list.append(child_status == NodeStatus.COMPLETE)
# 所有子节点状态都是完成状态时,返回完成状态
if all(status_list):
self.status = NodeStatus.COMPLETE
return self.status
class ParallelNode(Node):
""" 并行节点 """
node_type = NodeType.parallel
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入等待状态,等待任一子节点是完成状态时进入完成状态
# 进入等待状态
self.status = NodeStatus.WAITING
# 同时执行子节点,并检查完成状态
status_list = []
for child in self.children:
child_status = child.tick()
status_list.append(child_status == NodeStatus.COMPLETE)
# 任一子节点状态是完成状态时,返回完成状态
if any(status_list):
self.status = NodeStatus.COMPLETE
# 如果是完成状态,需要把未完成的子节点设置为跳过状态
if self.status == NodeStatus.COMPLETE:
node_list = [self.children[i] for i in range(len(status_list)) if not status_list[i]]
for node in node_list:
node.status = NodeStatus.SKIP
return self.status
class ConditionNode(Node):
""" 条件节点 """
node_type = NodeType.condition
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入等待状态,等待任一子节点是完成状态时进入完成状态
# 进入等待状态
self.status = NodeStatus.WAITING
# 检查条件长度,如果为0直接完成状态
condition_length = len(self.conditions)
if condition_length == 0:
self.status = NodeStatus.COMPLETE
return self.status
# 逐一遍历每个子节点,判断条件指定情况和子节点执行情况
status_list = [False] * len(self.children)
for index, child in enumerate(self.children):
# 没有判断条件的跳过
if index >= condition_length:
continue
# 条件不是字符串,或空字符串的跳过
condition = self.conditions[index]
if not isinstance(condition, str) or condition == '':
continue
# 条件判断结果为false时跳过
try:
condition_status = bool(eval(condition, self.context.data))
if not condition_status:
continue
except Exception as e:
logging.info(f'条件节点判断条件时出错:{index}:{condition}:{e}')
# 执行子节点,检查执行结果是否为完成
child_status = child.tick()
status_list[index] = (child_status == NodeStatus.COMPLETE)
# 任一子节点状态是完成状态时,返回完成状态
if any(status_list):
self.status = NodeStatus.COMPLETE
# 如果是完成状态,需要把未完成的子节点设置为跳过状态
if self.status == NodeStatus.COMPLETE:
node_list = [self.children[i] for i in range(len(status_list)) if not status_list[i]]
for node in node_list:
node.status = NodeStatus.SKIP
return self.status
class LoopNode(Node):
""" 循环节点 """
node_type = NodeType.loop
def __init__(self, node_id: str, node_name: str, raw_data: dict):
super().__init__(node_id, node_name, raw_data)
self.max_children = 1 # 只允许添加1个子节点
self.max_loop_count = 1
self.current_loop_count = 0
class NodeFactory(object):
""" 节点工厂 """
# 节点类字典
node_class_dict = {
i.node_type: i for i in
(RootNode, OriginateNode, ApprovalNode, SerialNode, JoinNode, ParallelNode, ConditionNode)
}
@classmethod
def make_node(cls, node_list: List[dict], mapping: dict = None) -> Context:
""" 制造节点 """
# 实例化上下文对象
context = Context()
if not node_list:
return context
mapping = mapping or {}
id_field_name = mapping.get('node_id') or 'node_id'
name_field_name = mapping.get('node_name') or 'node_name'
type_field_name = mapping.get('node_type') or 'node_type'
sort_field_name = mapping.get('sort') or 'sort'
parent_field_name = mapping.get('parent_id') or 'parent_id'
status_field_name = mapping.get('status') or 'status'
for i in sorted(node_list, key=lambda x: x.get(sort_field_name)):
node_id = i[id_field_name]
node_type = i[type_field_name]
node_class: Type[Node] = cls.node_class_dict.get(node_type)
if node_class is None:
raise NodeNotFoundError(f'节点数据无法获取节点类:{id}:{type}')
# 实例化节点对象
node = node_class(
node_id=node_id,
node_name=i[name_field_name],
raw_data=i,
)
node.status = i.get(status_field_name, NodeStatus.FUTURE)
node.context = context
# 加入节点字典中,用于增加子节点
context.register(node)
# 如果有父节点id时,把当前节点加入到父节点中
parent_id = i[parent_field_name]
if parent_id:
context.get_node(parent_id).add_child(node)
return context | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_workflow/context/base.py | base.py |
from io import BytesIO
from typing import List
from zawn_orm.database import Operation
from zawn_workflow.context.context import BaseContext
class WorkFlowIdentity(BaseContext):
""" 工作流程身份 """
def import_data(self, data: list, **kwargs) -> BaseContext:
""" 导入身份信息列表 """
# 整理用户和用户组的数据,写入数据库
user_dict = {}
new_data = []
for row in data:
user_id = row.pop('用户编号', None)
user_name = row.get('用户姓名', None)
# 无效的数据,或者已经存在的数据,则跳过
if (not (user_id and user_name)) or (user_name in user_dict):
continue
# 整理映射数据和新的列表
user_dict[user_name] = user_id
new_data.append(row)
for user_name, user_id in user_dict.items():
# 用户数据
self.operation_list.append(self.data_factory.work_user.new_operation(
'insert',
filter={},
update=self.data_factory.work_user.to_db({'user_id': user_id, 'user_name': user_name, **kwargs}),
))
for row in new_data:
user_id = user_dict[row.pop('用户姓名')]
# 用户组数据
for group_name, value in row.items():
if value in ['', '-']:
continue
# 如果关联结果存在于用户字典,则以关联类型新增记录
if value in user_dict:
self.operation_list.append(self.data_factory.work_group.new_operation(
'insert',
filter={},
update=self.data_factory.work_group.to_db({
'group_type': 'relation',
'group_name': group_name,
'user_id': user_id,
'target_id': user_dict[value],
**kwargs,
}),
))
# 如果关联结果不存在于用户字典,则以用户组类型新增记录
else:
self.operation_list.append(self.data_factory.work_group.new_operation(
'insert',
filter={},
update=self.data_factory.work_group.to_db({
'group_type': 'group',
'group_name': group_name,
'user_id': user_id,
'target_id': '_',
**kwargs,
}),
))
return self
def import_xlsx(self, xlsx_data: BytesIO, **kwargs) -> BaseContext:
""" 导入身份信息表格 """
# 读取xlsx文件,读取失败将返回0
# 清空所有用户和用户组数据,重新写入最新数据
# 读取xlsx文件,读取失败将返回0
data = self.read_xlsx(xlsx_data, 0)
if not data:
return self
# 清空所有用户和用户组数据,重新写入最新数据
self.operation_list: List[Operation] = [
self.data_factory.work_user.new_operation(
'delete',
filter=kwargs,
update={}
),
self.data_factory.work_group.new_operation(
'delete',
filter=kwargs,
update={}
),
]
self.import_data(data, **kwargs)
return self | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_workflow/context/identity.py | identity.py |
from zawn_orm import field, model
from zawn_orm.state import BaseState
class State(BaseState):
SUBMIT: str = 'S1'
DELETE: str = 'S2'
class WorkUser(model.Model):
""" 工作用户 """
user_id = field.StringField()
user_name = field.StringField()
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkGroup(model.Model):
""" 工作用户组 """
group_type = field.StringField()
group_name = field.StringField()
user_id = field.StringField()
target_id = field.StringField()
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkForm(model.Model):
""" 工作表单 """
form_id = field.StringField() # 表单id
form_label = field.StringField() # 表单标签,每个独立表单都有一样的标签
form_name = field.StringField() # 表单字段名
form_type = field.StringField() # 表单类型
placeholder = field.StringField() # 占位符
disabled = field.BooleanField() # 是否禁用
required = field.BooleanField() # 是否必填
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkModel(model.Model):
""" 工作模型 """
model_id = field.StringField() # 模型id
model_key = field.StringField() # 模型key
model_name = field.StringField() # 模型名称
model_type = field.StringField() # 模型分类类型
version = field.IntegerField() # 模型版本号,模型key重复时+1
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
class WorkNode(model.Model):
""" 工作节点 """
node_id = field.StringField() # 节点id
parent_id = field.StringField() # 父节点id
node_name = field.StringField() # 节点名称
node_type = field.StringField() # 节点类型
sort = field.StringField() # 排序序号
handler_type = field.StringField() # 处理人类型,不指定、指定、表单、关联、用户组
handler_id = field.StringField() # 处理人id
form_label = field.StringField() # 表单标签
model_id = field.StringField() # 模型id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkInstance(model.Model):
""" 工作实例 """
instance_id = field.StringField() # 实例id
instance_name = field.StringField() # 实例名称,以发起人姓名+模型名称为名
step = field.IntegerField() # 当前步进数
completion_rate = field.IntegerField() # 完成率 0-100区间
model_id = field.StringField() # 所属模型id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkTask(model.Model):
""" 工作任务 """
task_id = field.StringField() # 任务id
parent_id = field.StringField() # 父任务id
task_name = field.StringField() # 任务名称
task_type = field.StringField() # 任务类型
sort = field.StringField() # 排序序号
handler_type = field.StringField() # 处理人类型,不指定、指定、表单、关联、用户组
handler_id = field.StringField() # 处理人id
instance_id = field.StringField() # 所属实例id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkRecord(model.Model):
""" 工作记录 """
record_id = field.StringField() # 记录id
handler_id = field.StringField() # 用户id
event = field.StringField() # 事件,同意、驳回
comment = field.IntegerField() # 评论
step = field.IntegerField() # 步进数
task_id = field.StringField() # 所属任务id
instance_id = field.StringField() # 所属实例id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkStore(model.Model):
""" 工作贮存 """
store_id = field.StringField() # 贮存id
label = field.StringField() # 表单展示标题
name = field.StringField() # 表单字段名称
type = field.StringField() # 表单类型 包含基本类型和自定义类型
value = field.StringField() # 表单值 统一序列化为字符串
record_id = field.StringField() # 所属记录id
task_id = field.StringField() # 所属任务id
instance_id = field.StringField() # 所属实例id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class DataFactory(object):
""" 数据工厂 """
work_user: model.Model = None
work_group: model.Model = None
work_model: model.Model = None
work_node: model.Model = None
work_form: model.Model = None
work_instance: model.Model = None
work_task: model.Model = None
work_value: model.Model = None
work_record: model.Model = None
_this = None
_instance: 'DataFactory' = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
instance = super().__new__(cls, *args, **kwargs)
instance.work_user = WorkUser()
instance.work_group = WorkGroup()
instance.work_model = WorkModel()
instance.work_node = WorkNode()
instance.work_form = WorkForm()
instance.work_instance = WorkInstance()
instance.work_task = WorkTask()
instance.work_record = WorkRecord()
instance.work_store = WorkStore()
cls._instance = instance
return cls._instance
if __name__ == '__main__':
data_factory = DataFactory()
print(id(data_factory))
data_factory = DataFactory()
print(id(data_factory)) | zawn-utils | /zawn-utils-0.0.12.tar.gz/zawn-utils-0.0.12/zawn_workflow/data/factory.py | factory.py |
import datetime
from io import BytesIO
from typing import List
from zawn_orm.database import Operation
from zawn_workflow.context.base import NodeFactory
from zawn_workflow.context.context import BaseContext
class WorkFlowRepository(BaseContext):
""" 工作流程仓库 """
def import_form_data(self, form_data: list, **kwargs) -> BaseContext:
""" 导入表单数据 """
now = datetime.datetime.now().astimezone()
op = kwargs.get('op', 'system')
work_form = self.data_factory.work_form
form_dict = {}
form_label_list = []
for row in form_data:
form_id = row['表单编号']
form_label = row['表单标签']
form_dict[form_id] = {
'form_id': form_id,
'form_label': form_label,
'form_name': row['表单字段'],
'form_type': row['表单类型'],
'placeholder': row['占位符'],
'disabled': row['能否编辑'] in ['Y', '能', '是'],
'required': row['是否必填'] in ['Y', '能', '是'],
**kwargs,
}
form_label_list.append(form_label)
# 更新旧表单的状态为已删除
self.operation_list.append(work_form.new_operation(
'update',
filter={'form_label': {'$in': list(set(form_label_list))}, **kwargs},
update={'$set': work_form.to_db({'status': work_form.state.DELETE, 'update_at': now, 'update_by': op})},
))
# 新增表单
for _, form in form_dict.items():
self.operation_list.append(work_form.new_operation(
'insert',
filter={},
update=work_form.to_db(form)
))
return self
def import_model_data(
self, model_data: list, **kwargs) -> BaseContext:
""" 导入模型列表 """
work_model = self.data_factory.work_model
model_dict = {}
for row in model_data:
model_id = row['模型编号']
model_dict[model_id] = {
'model_id': model_id,
'model_key': row['模型KEY'],
'model_type': row['模型分类'],
'model_name': row['模型名称'],
**kwargs,
}
for _, v in model_dict.items():
self.operation_list.append(work_model.new_operation(
'insert',
filter={},
update=work_model.to_db(v)
))
return self
def import_node_data(self, node_data: list, **kwargs) -> BaseContext:
""" 导入节点列表 """
work_node = self.data_factory.work_node
node_dict = {}
for i, row in enumerate(node_data):
node_id = row['节点编号']
model_id = row['模型编号']
if model_id not in node_dict:
node_dict[model_id] = []
node_dict[model_id].append({
'node_id': node_id,
'parent_id': row['父节点编号'],
'node_name': row['节点名称'],
'node_type': row['节点类型'],
'sort': i,
'form_label': row['表单标签'],
'model_id': model_id,
**kwargs,
})
for model_id, node_list in node_dict.items():
context = NodeFactory.make_node(node_list)
if context is None:
continue
for node in context.root_node.to_node_list():
self.operation_list.append(work_node.new_operation(
'insert',
filter={},
update=work_node.to_db(node)
))
return self
def import_xlsx(self, xlsx_data: BytesIO, **kwargs):
""" 导入定义表格 """
form_data = self.read_xlsx(xlsx_data, 0)
model_data = self.read_xlsx(xlsx_data, 1)
node_data = self.read_xlsx(xlsx_data, 2)
if not (model_data and node_data and form_data):
return self
# 清空所有用户和用户组数据,重新写入最新数据
self.operation_list: List[Operation] = [
self.data_factory.work_model.new_operation(
'delete',
filter=kwargs,
update={}
),
self.data_factory.work_node.new_operation(
'delete',
filter=kwargs,
update={}
),
self.data_factory.work_form.new_operation(
'delete',
filter=kwargs,
update={}
),
]
self.import_form_data(form_data, **kwargs)
self.import_model_data(model_data, **kwargs)
self.import_node_data(node_data, **kwargs)
return self | zawn-workflow | /zawn_workflow-0.0.1.tar.gz/zawn_workflow-0.0.1/zawn_workflow/context/repository.py | repository.py |
import datetime
from zawn_workflow.context.base import NodeFactory
from zawn_workflow.context.context import BaseContext
from zawn_workflow.context.searcher import WorkFlowSearcher
from zawn_workflow.data.exception import ModelNotFoundError
class WorkFlowRuntime(BaseContext):
""" 工作流程运行时 """
def task_tick(self, instance_id: str, task_list: list) -> BaseContext:
""" 任务列表载入工作流程引擎执行 """
work_task = self.data_factory.work_task
task_field_mapping = {
'node_id': 'task_id',
'node_name': 'task_name',
'node_type': 'task_type',
}
context = NodeFactory.make_node(task_list, mapping=task_field_mapping).tick()
new_task_list = context.to_node_list(mapping=task_field_mapping)
for task in new_task_list:
self.operation_list.append(work_task.new_operation(
'insert', filter={}, update=work_task.to_db(task),
))
return self
def copy_node_to_task(self, node_list: list, instance_id: str, **kwargs) -> BaseContext:
""" 复制节点数据到任务数据 """
now = datetime.datetime.now().astimezone()
op = kwargs.get('op', 'system')
work_instance = self.data_factory.work_instance
# 创建任务id,组合成映射表
id_dict = {}
for node in node_list:
id_dict[node['node_id']] = work_instance.new_id()
# 组装任务操作列表
task_list = []
for node in node_list:
node_id = node['node_id']
parent_id = node['parent_id']
task_id = id_dict[node_id]
task = {
'task_id': task_id,
'parent_id': id_dict[parent_id],
'task_name': node['node_name'],
'task_type': node['node_type'],
'sort': node['sort'],
'handler_type': node['handler_type'],
'handler_id': node['handler_id'],
'instance_id': instance_id,
**kwargs,
'status': work_instance.state.SUBMIT,
'create_at': now,
'create_by': op,
}
task_list.append(task)
# 执行工作流程
self.task_tick(instance_id, task_list)
return self
def create_instance(self, model_id: str, **kwargs) -> BaseContext:
""" 创建工作实例 """
# 1. 复制
work_flow_searcher = WorkFlowSearcher()
meta, detail_list = (await work_flow_searcher.search_model_detail(model_id, **kwargs)).search()
if meta['total'] == 0:
raise ModelNotFoundError(f'工作模型未找到:{model_id}')
work_instance = self.data_factory.work_instance
work_task = self.data_factory.work_task
work_record = self.data_factory.work_record
work_store = self.data_factory.work_store
for model in detail_list:
model_id = model['model_id']
instance_id = work_instance.new_id()
nodes = model.pop('nodes', None) or []
forms = model.pop('forms', None) or []
if not nodes:
self.logger.info(f'模型没有配置节点信息:{model_id}')
continue
self.operation_list.append(work_instance.new_operation(
'insert', filter={}, update=work_instance.to_db({
'instance_id': instance_id,
'instance_name': model['model_name'],
'step': 0,
'completion_rate': 0,
'model_id': model_id,
})
))
self.copy_node_to_task(nodes, instance_id)
return self
def start(self, model_key: str, handler_id: str, **kwargs) -> BaseContext:
""" 启动 """
# 启动事件过程
# 1、检查模型和用户是否存在,如果不存在则返回提示
# 2、创建实例
# 3、复制节点到任务
# 4、执行一次tick
# 5、执行一次同意时间
work_flow_searcher = WorkFlowSearcher()
meta, model_list = await (await work_flow_searcher.search_model(model_key=model_key, **kwargs)).search()
if meta['total'] != 1:
return self
meta, user_list = await (await work_flow_searcher.search_user([handler_id], **kwargs)).search()
if meta['total'] == 0:
return self
return self
async def agree(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 同意事件 """
pass
async def reject(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 拒绝事件 """
pass
async def assign(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 指派事件 """
pass
async def entrust(
self, instance_id: str, handler_id: str, form_data: dict, **kwargs):
""" 委托事件 """
pass | zawn-workflow | /zawn_workflow-0.0.1.tar.gz/zawn_workflow-0.0.1/zawn_workflow/context/runtime.py | runtime.py |
import logging
from abc import ABC
from typing import Union, List, Optional, Type
from zawn_workflow.data.exception import NodeNotFoundError
class NodeType(object):
""" 节点类型 """
root = 'root' # 根
originate = 'originate' # 发起
approval = 'approval' # 审批
serial = 'serial' # 串行
join = 'join' # 会签
parallel = 'parallel' # 并行
condition = 'condition' # 条件
loop = 'loop' # 循环
class NodeStatus(object):
""" 节点状态 """
FUTURE: str = 'FUTURE' # 现在还没有走到的节点状态
WAITING: str = 'WAITING' # 只有复杂节点有该状态,表示在等待子节点审批
READY: str = 'READY' # 可以进行审批操作的简单节点是Ready状态
SKIP: str = 'SKIP' # 当一个并行节点的子节点状态为非(Ready, Waiting)时,其它兄弟节点及其子节点的状态被置为Skip
COMPLETE: str = 'COMPLETE' # 已经审批完成的节点状态
class HandlerType(object):
""" 处理人类型 """
STATIC: str = 'STATIC' # 设计阶段静态录入人员
FORM: str = 'FORM' # 运行阶段表单动态录入人员
RELATION: str = 'RELATION' # 通过映射函数获取人员
class Context(object):
""" 上下文类 """
def __init__(self):
self.root_node: Optional[Node] = None
self._data: List[dict] = []
self._node_dict: dict = {}
@property
def data(self) -> dict:
return_data: dict = self._data.pop(0) if len(self._data) > 0 else {}
return return_data
@data.setter
def data(self, value: dict):
self._data.append(value)
def register(self, node: 'Node'):
""" 注册节点信息 """
if self.root_node is None:
self.root_node = node
self._node_dict[node.node_id] = node
def get_node(self, node_id: str) -> 'Node':
""" 通过node_id获取节点 """
return self._node_dict.get(node_id)
def change_status(self, node_id: str, status: str):
""" 修改节点状态 """
try:
node = self._node_dict[node_id]
node.status = status
except Exception as e:
message = f'{e}'
logging.error(message, exc_info=True)
def tick(self, form_data: dict) -> 'Context':
""" 嘀嗒方法,每帧刷新新数据 """
self._data = form_data
self.root_node.tick()
return self
def to_node_list(self, node_list: List[dict] = None, parent_id: str = '', mapping: dict = None) -> List[dict]:
""" 转成节点列表 """
self.root_node.to_node_list(node_list=node_list, parent_id=parent_id, mapping=mapping)
class Node(ABC):
""" 节点类 """
node_type = 'base'
def __init__(self, node_id: str, node_name: str, raw_data: dict):
self.node_id: str = node_id
self.node_name: str = node_name
self.parent: Optional[Node] = None
self.children: List[Node] = []
self.max_children: int = -1
self._context: Optional[Context] = None
self._status: str = NodeStatus.FUTURE
# 处理类型、处理人id
self._handler_type: str = HandlerType.STATIC
self._handler_id: str = '' # 静态时为指定id,动态时为上下文的字段名,映射时为函数名称
# 分支条件,下标和子节点对应,不足时不执行
self._conditions: List[str] = []
# 存放原始数据
self.raw_data = raw_data
@property
def context(self):
return self._context
@context.setter
def context(self, value: Context):
self._context = value
@property
def status(self):
return self._status
@status.setter
def status(self, value: str):
self._status = value
@property
def handler_type(self):
return self._handler_type
@handler_type.setter
def handler_type(self, value: str):
self._handler_type = value
@property
def handler_id(self):
return self._handler_id
@handler_id.setter
def handler_id(self, value: str):
self._handler_id = value
@property
def conditions(self):
return self._conditions
@conditions.setter
def conditions(self, value: str):
self._conditions = value
def __repr__(self):
return f'<{self.node_type} {self.node_id}.{self.node_name}>'
def __str__(self):
return f'{self.node_id}.{self.node_name}'
def __eq__(self, other):
return isinstance(other, Node) and self.node_id == other.node_id
def set_parent(self, node: 'Node'):
""" 设置父节点 """
self.parent = node
def get_children_count(self) -> int:
""" 获取子节点数量 """
return len(self.children)
def is_index_valid(self, index: int) -> bool:
""" 校验下标合法 """
return 0 <= index < self.get_children_count()
def get_child(self, index: int) -> Union['Node', None]:
""" 获取子节点 """
if index >= self.get_children_count() or index < 0:
return None
return self.children[index]
def add_child(self, node: 'Node') -> 'Node':
""" 增加子节点 """
if 0 <= self.max_children <= self.get_children_count():
return self
self.children.append(node)
return self
def remove_child(self, node: 'Node') -> 'Node':
""" 删除子节点 """
for i in range(self.get_children_count()):
if node.node_id == self.get_child(i).node_id:
self.children.pop(i)
break
return self
def to_node_list(self, node_list: List[dict] = None, parent_id: str = '', mapping: dict = None) -> List[dict]:
""" 转成节点列表 """
node_list = node_list or []
mapping = mapping or {}
id_field_name = mapping.get('node_id') or 'node_id'
name_field_name = mapping.get('node_name') or 'node_name'
type_field_name = mapping.get('node_type') or 'node_type'
sort_field_name = mapping.get('sort') or 'sort'
parent_field_name = mapping.get('parent_id') or 'parent_id'
status_field_name = mapping.get('status') or 'status'
handler_type_field_name = mapping.get('handler_type') or 'handler_type'
handler_id_field_name = mapping.get('handler_id') or 'handler_id'
conditions_field_name = mapping.get('conditions') or 'conditions'
data = self.raw_data
data.update({
id_field_name: self.node_id,
name_field_name: self.node_name,
type_field_name: self.node_type,
sort_field_name: len(node_list) + 1,
parent_field_name: parent_id,
status_field_name: self.status,
handler_type_field_name: self.handler_type,
handler_id_field_name: self.handler_id,
conditions_field_name: self.conditions,
})
node_list.append(data)
for child in self.children:
child.to_node_list(node_list=node_list, parent_id=self.node_id, mapping=mapping)
return node_list
def tick(self) -> str:
""" 嘀嗒方法,每帧刷新新数据 """
running_status = self.on_tick(self.context)
return running_status
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
return self.status
def pre_condition(self):
""" 前置条件,满足前置条件才能进入该节点 """
pass
def post_condition(self):
""" 后置条件,满足后置条件该节点才能审批完成 """
pass
def pre_script(self):
""" 前置脚本,开始审批该节点时执行 """
pass
def post_script(self):
""" 后置脚本,审批完成该节点时执行 """
class RootNode(Node):
""" 根节点 """
node_type = NodeType.root
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 逐一遍历每个子节点,当子节点处于完成,才会执行下一个子节点
# 如果节点状态是已完成,则直接返回已完成
if self.status == NodeStatus.COMPLETE:
return self.status
# 等待状态
self.status = NodeStatus.WAITING
# 遍历每个子节点,执行嘀嗒方法
# 如果嘀嗒方法返回不是完成状态,则直接返回等待状态
for child in self.children:
child_status = child.tick()
if child_status != NodeStatus.COMPLETE:
return self.status
# 当所有子节点都是完成状态时,返回完成状态
self.status = NodeStatus.COMPLETE
return self.status
class OriginateNode(Node):
""" 发起节点 """
node_type = NodeType.originate
def __init__(self, node_id: str, node_name: str, raw_data: dict):
super().__init__(node_id, node_name, raw_data)
self.max_children = 0 # 不允许添加子节点
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入准备状态,等待审核事件后进入完成状态
# 如果节点状态是已完成,则直接返回已完成
if self.status == NodeStatus.COMPLETE:
return self.status
# 准备状态,没有发起人数据时将一直处于这个状态
self.status = NodeStatus.READY
handler_id = context.data.get('handler_id')
if not handler_id:
return self.status
# 设置发起人信息,并标记完成状态
self.handler_type = HandlerType.STATIC
self.handler_id = handler_id
self.status = NodeStatus.COMPLETE
return self.status
class ApprovalNode(Node):
""" 审批节点 """
node_type = NodeType.approval
def __init__(self, node_id: str, node_name: str, raw_data: dict):
super().__init__(node_id, node_name, raw_data)
self.max_children = 0 # 不允许添加子节点
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入准备状态,等待审核事件后进入完成状态
# 如果节点状态是已完成,则直接返回已完成
if self.status == NodeStatus.COMPLETE:
return self.status
# 转换处理人信息
if self.handler_type == HandlerType.FORM:
self.handler_id = context.data.get(self.handler_id) or self.handler_id
elif self.handler_type == HandlerType.RELATION:
originator_id = context.data.get('originator_id') or ''
command = f'{self.handler_id}("{originator_id}")'
try:
handler_id = eval(command, context.data)
self.handler_id = handler_id or self.handler_id
except Exception as e:
logging.info(f'审批节点获取映射处理人出错:{command}:{e}')
else:
pass
# 准备状态,没有发起人数据时将一直处于这个状态
self.status = NodeStatus.READY
return self.status
class SerialNode(RootNode):
""" 串行节点 """
node_type = NodeType.serial
class JoinNode(Node):
""" 会签节点 """
node_type = NodeType.join
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入等待状态,等待所有子节点都是完成状态时进入完成状态
# 进入等待状态
self.status = NodeStatus.WAITING
# 同时执行子节点,并检查完成状态
status_list = []
for child in self.children:
child_status = child.tick()
status_list.append(child_status == NodeStatus.COMPLETE)
# 所有子节点状态都是完成状态时,返回完成状态
if all(status_list):
self.status = NodeStatus.COMPLETE
return self.status
class ParallelNode(Node):
""" 并行节点 """
node_type = NodeType.parallel
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入等待状态,等待任一子节点是完成状态时进入完成状态
# 进入等待状态
self.status = NodeStatus.WAITING
# 同时执行子节点,并检查完成状态
status_list = []
for child in self.children:
child_status = child.tick()
status_list.append(child_status == NodeStatus.COMPLETE)
# 任一子节点状态是完成状态时,返回完成状态
if any(status_list):
self.status = NodeStatus.COMPLETE
# 如果是完成状态,需要把未完成的子节点设置为跳过状态
if self.status == NodeStatus.COMPLETE:
node_list = [self.children[i] for i in range(len(status_list)) if not status_list[i]]
for node in node_list:
node.status = NodeStatus.SKIP
return self.status
class ConditionNode(Node):
""" 条件节点 """
node_type = NodeType.condition
def on_tick(self, context: Context) -> str:
""" 嘀嗒执行方法 """
# 首先进入等待状态,等待任一子节点是完成状态时进入完成状态
# 进入等待状态
self.status = NodeStatus.WAITING
# 检查条件长度,如果为0直接完成状态
condition_length = len(self.conditions)
if condition_length == 0:
self.status = NodeStatus.COMPLETE
return self.status
# 逐一遍历每个子节点,判断条件指定情况和子节点执行情况
status_list = [False] * len(self.children)
for index, child in enumerate(self.children):
# 没有判断条件的跳过
if index >= condition_length:
continue
# 条件不是字符串,或空字符串的跳过
condition = self.conditions[index]
if not isinstance(condition, str) or condition == '':
continue
# 条件判断结果为false时跳过
try:
condition_status = bool(eval(condition, self.context.data))
if not condition_status:
continue
except Exception as e:
logging.info(f'条件节点判断条件时出错:{index}:{condition}:{e}')
# 执行子节点,检查执行结果是否为完成
child_status = child.tick()
status_list[index] = (child_status == NodeStatus.COMPLETE)
# 任一子节点状态是完成状态时,返回完成状态
if any(status_list):
self.status = NodeStatus.COMPLETE
# 如果是完成状态,需要把未完成的子节点设置为跳过状态
if self.status == NodeStatus.COMPLETE:
node_list = [self.children[i] for i in range(len(status_list)) if not status_list[i]]
for node in node_list:
node.status = NodeStatus.SKIP
return self.status
class LoopNode(Node):
""" 循环节点 """
node_type = NodeType.loop
def __init__(self, node_id: str, node_name: str, raw_data: dict):
super().__init__(node_id, node_name, raw_data)
self.max_children = 1 # 只允许添加1个子节点
self.max_loop_count = 1
self.current_loop_count = 0
class NodeFactory(object):
""" 节点工厂 """
# 节点类字典
node_class_dict = {
i.node_type: i for i in
(RootNode, OriginateNode, ApprovalNode, SerialNode, JoinNode, ParallelNode, ConditionNode)
}
@classmethod
def make_node(cls, node_list: List[dict], mapping: dict = None) -> Context:
""" 制造节点 """
# 实例化上下文对象
context = Context()
if not node_list:
return context
mapping = mapping or {}
id_field_name = mapping.get('node_id') or 'node_id'
name_field_name = mapping.get('node_name') or 'node_name'
type_field_name = mapping.get('node_type') or 'node_type'
sort_field_name = mapping.get('sort') or 'sort'
parent_field_name = mapping.get('parent_id') or 'parent_id'
status_field_name = mapping.get('status') or 'status'
for i in sorted(node_list, key=lambda x: x.get(sort_field_name)):
node_id = i[id_field_name]
node_type = i[type_field_name]
node_class: Type[Node] = cls.node_class_dict.get(node_type)
if node_class is None:
raise NodeNotFoundError(f'节点数据无法获取节点类:{id}:{type}')
# 实例化节点对象
node = node_class(
node_id=node_id,
node_name=i[name_field_name],
raw_data=i,
)
node.status = i.get(status_field_name, NodeStatus.FUTURE)
node.context = context
# 加入节点字典中,用于增加子节点
context.register(node)
# 如果有父节点id时,把当前节点加入到父节点中
parent_id = i[parent_field_name]
if parent_id:
context.get_node(parent_id).add_child(node)
return context | zawn-workflow | /zawn_workflow-0.0.1.tar.gz/zawn_workflow-0.0.1/zawn_workflow/context/base.py | base.py |
from io import BytesIO
from typing import List
from zawn_orm.database import Operation
from zawn_workflow.context.context import BaseContext
class WorkFlowIdentity(BaseContext):
""" 工作流程身份 """
def import_data(self, data: list, **kwargs) -> BaseContext:
""" 导入身份信息列表 """
# 整理用户和用户组的数据,写入数据库
user_dict = {}
new_data = []
for row in data:
user_id = row.pop('用户编号', None)
user_name = row.get('用户姓名', None)
# 无效的数据,或者已经存在的数据,则跳过
if (not (user_id and user_name)) or (user_name in user_dict):
continue
# 整理映射数据和新的列表
user_dict[user_name] = user_id
new_data.append(row)
for user_name, user_id in user_dict.items():
# 用户数据
self.operation_list.append(self.data_factory.work_user.new_operation(
'insert',
filter={},
update=self.data_factory.work_user.to_db({'user_id': user_id, 'user_name': user_name, **kwargs}),
))
for row in new_data:
user_id = user_dict[row.pop('用户姓名')]
# 用户组数据
for group_name, value in row.items():
if value in ['', '-']:
continue
# 如果关联结果存在于用户字典,则以关联类型新增记录
if value in user_dict:
self.operation_list.append(self.data_factory.work_group.new_operation(
'insert',
filter={},
update=self.data_factory.work_group.to_db({
'group_type': 'relation',
'group_name': group_name,
'user_id': user_id,
'target_id': user_dict[value],
**kwargs,
}),
))
# 如果关联结果不存在于用户字典,则以用户组类型新增记录
else:
self.operation_list.append(self.data_factory.work_group.new_operation(
'insert',
filter={},
update=self.data_factory.work_group.to_db({
'group_type': 'group',
'group_name': group_name,
'user_id': user_id,
'target_id': '_',
**kwargs,
}),
))
return self
def import_xlsx(self, xlsx_data: BytesIO, **kwargs) -> BaseContext:
""" 导入身份信息表格 """
# 读取xlsx文件,读取失败将返回0
# 清空所有用户和用户组数据,重新写入最新数据
# 读取xlsx文件,读取失败将返回0
data = self.read_xlsx(xlsx_data, 0)
if not data:
return self
# 清空所有用户和用户组数据,重新写入最新数据
self.operation_list: List[Operation] = [
self.data_factory.work_user.new_operation(
'delete',
filter=kwargs,
update={}
),
self.data_factory.work_group.new_operation(
'delete',
filter=kwargs,
update={}
),
]
self.import_data(data, **kwargs)
return self | zawn-workflow | /zawn_workflow-0.0.1.tar.gz/zawn_workflow-0.0.1/zawn_workflow/context/identity.py | identity.py |
from zawn_orm import field, model
from zawn_orm.state import BaseState
class State(BaseState):
SUBMIT: str = 'S1'
DELETE: str = 'S2'
class WorkUser(model.Model):
""" 工作用户 """
user_id = field.StringField()
user_name = field.StringField()
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkGroup(model.Model):
""" 工作用户组 """
group_type = field.StringField()
group_name = field.StringField()
user_id = field.StringField()
target_id = field.StringField()
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkForm(model.Model):
""" 工作表单 """
form_id = field.StringField() # 表单id
form_label = field.StringField() # 表单标签,每个独立表单都有一样的标签
form_name = field.StringField() # 表单字段名
form_type = field.StringField() # 表单类型
placeholder = field.StringField() # 占位符
disabled = field.BooleanField() # 是否禁用
required = field.BooleanField() # 是否必填
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkModel(model.Model):
""" 工作模型 """
model_id = field.StringField() # 模型id
model_key = field.StringField() # 模型key
model_name = field.StringField() # 模型名称
model_type = field.StringField() # 模型分类类型
version = field.IntegerField() # 模型版本号,模型key重复时+1
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
class WorkNode(model.Model):
""" 工作节点 """
node_id = field.StringField() # 节点id
parent_id = field.StringField() # 父节点id
node_name = field.StringField() # 节点名称
node_type = field.StringField() # 节点类型
sort = field.StringField() # 排序序号
handler_type = field.StringField() # 处理人类型,不指定、指定、表单、关联、用户组
handler_id = field.StringField() # 处理人id
form_label = field.StringField() # 表单标签
model_id = field.StringField() # 模型id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkInstance(model.Model):
""" 工作实例 """
instance_id = field.StringField() # 实例id
instance_name = field.StringField() # 实例名称,以发起人姓名+模型名称为名
step = field.IntegerField() # 当前步进数
completion_rate = field.IntegerField() # 完成率 0-100区间
model_id = field.StringField() # 所属模型id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkTask(model.Model):
""" 工作任务 """
task_id = field.StringField() # 任务id
parent_id = field.StringField() # 父任务id
task_name = field.StringField() # 任务名称
task_type = field.StringField() # 任务类型
sort = field.StringField() # 排序序号
handler_type = field.StringField() # 处理人类型,不指定、指定、表单、关联、用户组
handler_id = field.StringField() # 处理人id
instance_id = field.StringField() # 所属实例id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkRecord(model.Model):
""" 工作记录 """
record_id = field.StringField() # 记录id
handler_id = field.StringField() # 用户id
event = field.StringField() # 事件,同意、驳回
comment = field.IntegerField() # 评论
step = field.IntegerField() # 步进数
task_id = field.StringField() # 所属任务id
instance_id = field.StringField() # 所属实例id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class WorkStore(model.Model):
""" 工作贮存 """
store_id = field.StringField() # 贮存id
label = field.StringField() # 表单展示标题
name = field.StringField() # 表单字段名称
type = field.StringField() # 表单类型 包含基本类型和自定义类型
value = field.StringField() # 表单值 统一序列化为字符串
record_id = field.StringField() # 所属记录id
task_id = field.StringField() # 所属任务id
instance_id = field.StringField() # 所属实例id
org_id = field.StringField() # 所属机构id
status = field.StringField() # 状态
create_at = field.DatetimeField() # 创建时间
create_by = field.StringField() # 创建人
update_at = field.DatetimeField() # 更新时间
update_by = field.StringField() # 更新人
state = State() # 有限状态机
class DataFactory(object):
""" 数据工厂 """
work_user: model.Model = None
work_group: model.Model = None
work_model: model.Model = None
work_node: model.Model = None
work_form: model.Model = None
work_instance: model.Model = None
work_task: model.Model = None
work_value: model.Model = None
work_record: model.Model = None
_this = None
_instance: 'DataFactory' = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
instance = super().__new__(cls, *args, **kwargs)
instance.work_user = WorkUser()
instance.work_group = WorkGroup()
instance.work_model = WorkModel()
instance.work_node = WorkNode()
instance.work_form = WorkForm()
instance.work_instance = WorkInstance()
instance.work_task = WorkTask()
instance.work_record = WorkRecord()
instance.work_store = WorkStore()
cls._instance = instance
return cls._instance
if __name__ == '__main__':
data_factory = DataFactory()
print(id(data_factory))
data_factory = DataFactory()
print(id(data_factory)) | zawn-workflow | /zawn_workflow-0.0.1.tar.gz/zawn_workflow-0.0.1/zawn_workflow/data/factory.py | factory.py |
import functools
import glob
import subprocess
import nox
LINT_ITEMS = "nox.py", "docs/source/conf.py", "zazo", "tests"
@nox.session
def docs(session):
session.install("-r", "tools/reqs/docs.txt")
session.run("sphinx-build", "-n", "-b", "html", "docs/source", "docs/build")
@nox.session
def packaging(session):
session.install("-r", "tools/reqs/packaging.txt")
session.run("flit", "build")
def lint_session(func):
@functools.wraps(func)
def wrapped(session):
if session.posargs:
files = session.posargs
else:
files = LINT_ITEMS
session.install("--pre", "-r", "tools/reqs/lint.txt")
session.run("black", "--version")
session.run("isort", "--version")
session.run("mypy", "--version")
return func(session, files)
return wrapped
@nox.session
@lint_session
def lint(session, files):
session.run("black", "--check", "--diff", *files)
session.run("isort", "--check-only", "--diff", "--recursive", *files)
session.run("mypy", "--ignore-missing-imports", "--check-untyped-defs", "zazo")
session.run(
"mypy", "-2", "--ignore-missing-imports", "--check-untyped-defs", "zazo"
)
@nox.session
@lint_session
def format(session, files):
session.run("black", *files)
session.run("isort", "--recursive", *files)
@nox.session
@nox.parametrize("python_version", ["2.7", "3.4", "3.5", "3.6", "3.7", "pypy", "pypy3"])
def test(session, python_version):
# Set the interpreter
if python_version.startswith("pypy"):
session.interpreter = python_version
else:
session.interpreter = "python" + python_version
# Build the package.
# THIS IS A HACK
# Working around all kinds of weird nox + flit + Travis CI behavior.
# We're building a wheel here and installing it with session.install since
# nox is declarative but we need to run the build command before executing
# code.
def my_run(*args):
print("run > " + " ".join(args))
try:
subprocess.check_call(args)
except subprocess.CalledProcessError:
session.error("Command failed.")
my_run("python3", "-m", "flit", "build")
files = glob.glob("./dist/*.whl")
if not files:
session.error("Could not find any built wheels.")
# Install the package and test dependencies.
session.install(*files)
session.install("-r", "tools/reqs/test.txt")
# Run the tests
session.cd("tests") # we change directory to avoid the cwd ambiguity
session.run("pytest", *session.posargs) | zazo | /zazo-0.0.0a3.tar.gz/zazo-0.0.0a3/nox.py | nox.py |
# zazo
[](https://travis-ci.org/pradyunsg/zazo)
A Pluggable Dependency Resolver written in Python. Intended for bringing dependency resolution to pip.
## Motivation
The motivation for this project is to make it feasible and easy for user-facing package managers written in Python to do proper dependency resolution.
This project has grown out of a [GSoC Project], which aimed to bring proper dependency resolution to pip. Once this package is ready, work will be done to make pip use this instead of its home-grown solution.
## Development
This project uses nox extensively.
- Documentation is built with `nox -s docs`.
- Linting and MyPy checking can be done using `nox -s lint`
- Tests are run with `nox -s test`.
Currently, the documentation of this project is non-existent but this shall be rectified once the actual internal details of the package stabilize.
[GSoC Project]: https://summerofcode.withgoogle.com/archive/2017/projects/5797394100781056/
| zazo | /zazo-0.0.0a3.tar.gz/zazo-0.0.0a3/README.md | README.md |
Zazu (at your service)
======================
.. image:: http://vignette1.wikia.nocookie.net/disney/images/c/ca/Zazu01cf.png
:height: 150 px
:width: 150 px
:align: center
Zazu is a CLI development workflow management tool that combines
elements of git flow with CI and issue tracking.
..
digraph G {
"Zazu" -> "TeamCity"
"Zazu" -> "GitHub"
"Zazu" -> "Jira"
}
.. image:: https://github.com/stopthatcow/zazu/raw/9357ae070b6277ad59579e95e036c264ba63086f/doc/services.png
:align: center
Zazu is implemented in Python and is a
`Click <http://click.pocoo.org/5/>`__ based CLI. If you're wondering why
Click, this is a well `answered <http://click.pocoo.org/5/why/>`__
question.
Install
-------
Pre-requsites (linux)
~~~~~~~~~~~~~~~~~~~~~
::
sudo apt-get install libncurses-dev python-dev libssl-dev libffi-dev
sudo pip install keyrings.alt
All platforms
~~~~~~~~~~~~~
::
git clone [email protected]:stopthatcow/zazu.git
cd zazu
sudo pip install --upgrade pip
sudo pip install --upgrade .
If you get an error about a package called "six" use the following
command instead: ``sudo pip install --upgrade --ignore-installed six .``
Command overview
----------------
The following diagram shows the available subcommands of zazu.
..
digraph G {
"zazu" -> "build"
"zazu" -> "tool"
"tool" -> "install"
"tool" -> "uninstall"
"zazu" -> "style"
"zazu" -> "repo"
"repo" -> "setup"
"setup" -> "hooks"
"setup" -> "ci"
"repo" -> "cleanup"
"repo" -> "repo_init"
repo_init [label=init, style=dashed]
"repo" -> "repo_clone"
repo_clone [label=clone, style=dashed]
"zazu" -> "dev"
"dev" -> "start"
"dev" -> "status"
dev_builds [label=builds, style=dashed]
"dev" -> "dev_builds"
"dev" -> "review"
"dev" -> "ticket"
}
.. image:: https://github.com/stopthatcow/zazu/raw/9357ae070b6277ad59579e95e036c264ba63086f/doc/cmds.png
:align: center
Note: dashed lines are not yet implemented
Repo management
---------------
- ``zazu repo clone <name>`` clones repo from github and installs GIT
hooks (Unimplemented)
- ``zazu repo init <name>`` initializes repo to default project
structure (Unimplemented)
- ``zazu repo setup hooks`` installs default GIT hooks to the repo
- ``zazu repo setup ci`` sets up CI builds based on the zazu.yaml file
in the repo
CI build configuration management
---------------------------------
Zazu can setup CI server builds (currently only TeamCity is supported)
to build targets specified by a recipe file (the zazu.yaml file in the
root of a repo).
- ``zazu repo setup ci``
Development workflow management
-------------------------------
- ``zazu dev start`` interactivly creates new JIRA ticket
- ``zazu dev start <name>`` e.g.
``zazu dev start LC-440_a_cool_feature``
- ``zazu dev status`` displays ticket and pull request status
- ``zazu dev ticket`` launches web browser to the ticket page
- ``zazu dev builds`` launches web browser to the CI project page
- ``zazu dev review`` launches web browser to create/view a pull
request
Code Style Enforcement
----------------------
- ``zazu style`` fixes code style using astyle and autopep8
Building
--------
Zazu uses the zazu.yaml file to build goals defined there
- ``zazu build <goal>``
- The target architecture is assumed to be 'local' but may be
overridden using the --arch flag. e.g
``zazu build --arch=arm32-linux-gnueabihf package`` would build
targeting 32 bit arm linux.
Passing variables to the build
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You may pass extra variables to the build using key=value pairs.
``zazu build --arch=arm32-linux-gnueabihf package FOO=bar`` This sets
the environement variable *FOO* to the value *bar* during the build.
Build tool installation
-----------------------
Zazu will automatically try to obtain required build tools needed for
each target as specified in the zazu.yaml file. These may be
installed/uninstalled manually as well:
- ``zazu tool install <tool==version>``
- ``zazu tool uninstall <tool==version>``
These tools will be installed to the ``~/.zazu/tools/`` folder.
zazu.yaml file
--------------
The zazu.yaml file lives at the base of the repo and describes the CI
goals and architectures to be run. In addition it describes the
requirements for each goal.
::
components:
- name: networkInterface
goals:
- name: coverage
description: "Runs the \"check\" target and reports coverage via gcovr"
buildType: coverage
buildVars:
LOCAL_SERVER: ON
builds:
- arch: x86_64-linux-gcc
- name: package
buildType: minSizeRel
builds:
- arch: arm32-linux-gnueabihf
requires:
zazu:
- gcc-linaro-arm-linux-gnueabihf==4.9
- arch: x86_64-linux-gcc
style:
exclude:
- dependencies/ #list path prefixes here to exclude from style
- build/
astyle:
options:
- "--options=astyle.conf" # options passed to astyle
include:
- src/*.cpp # list of globs of files to style
- include/*.h
- test/*.cpp
autopep8:
options:
- "--max-line-length=150" # options passed to autopep8
zazu: 0.2.0 # optional required zazu version
Compiler tuples
~~~~~~~~~~~~~~~
Architectures are defined as tuple in the folowing form:
``<ISA>-<OS>-<ABI>``
============
Examples
============
- x86\_64-linux-gcc
- x86\_32-linux-gcc
- x86\_64-win-msvc\_2013
- x86\_64-win-msvc\_2015
- x86\_32-win-msvc\_2013
- x86\_32-win-msvc\_2015
- arm32-linux-gnueabihf
- arm32-none-eabi
Command autocompletion
----------------------
Note that autocompletion currently only works for commands and
subcommands (not arguments) ###BASH users Add the following to your
``~/.bashrc`` file:
::
_zazu_completion() {
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \
COMP_CWORD=$COMP_CWORD \
_ZAZU_COMPLETE=complete $1 ) )
return 0
}
complete -F _zazu_completion -o default zazu;
ZSH users
~~~~~~~~~
Add the following to your ``~/.zshrc`` file
::
autoload bashcompinit
bashcompinit
_zazu_completion() {
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \
COMP_CWORD=$COMP_CWORD \
_ZAZU_COMPLETE=complete $1 ) )
return 0
}
complete -F _zazu_completion -o default zazu;
Handy aliases
-------------
::
alias zz="zazu"
alias zd="zazu dev"
alias zds="zazu dev start"
alias zdr="zazu dev review"
alias zdt="zazu dev ticket"
alias zdb="zazu dev builds"
alias zs="zazu style"
alias zb="zazu build"
| zazu | /zazu-0.8.1.tar.gz/zazu-0.8.1/README.rst | README.rst |
# Zigbee CLI wrapper
Zigbee CLI wrapper (zb_cli_wrapper) is a Python package for the nRF5 SDK for Zigbee that includes a custom-made Python wrapper for simplifying communication with the Zigbee CLI and improving the control of the Zigbee network.
## Prerequisites
Make sure you have the following hardware and software before using the wrapper.
### Hardware
The wrapper is to be used with the [Zigbee CLI Agent example],
and it requires the same hardware as this example. Make sure the hardware supports Zigbee.
### Software
You need the following software:
- [Python 3.7][Python 3 download] or newer
- [pip][pip install]
- setuptools (latest version)
- Install them with the following command:
```
`pip install -U setuptools`
```
## Installing the wrapper
To install the package, use pip:
```python
pip install zb-cli-wrapper
```
You can also [download package from PyPI][pypi zb-cli-wrapper] and run the following command in the package source folder to install it:
```
python setup.py install
```
This will also install all required packages.
## Configuring the wrapper
To use wrapper, you must configure both hardware and software by completing the following steps:
- flashing the board
- creating connection with CLI
### Flashing the board
Flash the [Zigbee CLI Agent example] on the board you are using, as described on the example page.
For flashing any nRF5 board, you can use the [nRF Connect for Desktop] application, available for Windows, Linux and macOS. The hex you need can be found in `/hex` directory.
You can also flash your board using [nrfutil] or using python module [pynrfjprog].
### Creating connection with CLI
For every CLI device that you want to communicate with by using zb_cli_wrapper, use an instance of the `ZbCliDevice` class, located in the `zb_cli_dev.py` file.
Object of this class is used to write and read from the board that runs the Zigbee CLI Agent example.
The `ZbCliDevice` object offers methods imported from files located at `zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/`: `bdb.py`, `zcl.py`, `zdo.py`, `log.py`.
These methods can be accessed as `<object>.bdb.method`, `<object>.zcl.method`, `<object>.zdo.method`, respectively.
See the example in the following sections.
To create connection with CLI:
1. Import the `zb_cli_wrapper` package once it is installed:
```
from zb_cli_wrapper.zb_cli_dev import ZbCliDevice
```
2. Create a `ZbCliDevice` object:
1. Specify how the object will find the com port to which your nRF board is connected by passing one of the following information as an argument for constructor:
- If your onboard SEGGER J-Link is connected to your PC, use SEGGER number: `segger='680000000'`.
- If your onboard nRF is connected directly to PC, use the serial number: `cdc_serial='EF0000000000'`.
- If you want to specify the com port manually, use the com port number: `com_port='COM20'`.
2. Run the following command (replace the SEGGER number depending on your choice in step 1):
```
cli_instance = ZbCliDevice(segger='680000000')
```
**Note:** Make sure you use correct numbers.
If you use a development kit with onboard J-Link, such as [nRF52840 DK] or [nRF52833 DK], you can read the SEGGER number from the sticker on the board.
In case of a nRF USB Dongle, such as [nRF52840 Dongle], you can read the serial number, from the sticker on the board.
When the object is created, it automatically tries to connect to the com_port to which the CLI is connected.
## Using the wrapper
To start using the wrapper, complete the following steps:
- connecting to network (or creating a network)
- discovering the device present in the network and getting its addresses and endpoints
- reading the attribute value of the device
- controlling the device using CLI
At the end, you can close CLI and the connection with the board.
**Note:** Light bulb is used as the example device in this section.
### Connecting to network
To connect to network (or create a network):
1. Connect to the existing Zigbee network or create a new one by setting one of the following CLI roles:
- To set role as router (join network): `cli_instance.bdb.role = 'zr'`
- To set role as coordinator (create network): `cli_instance.bdb.role = 'zc'`
2. Set channel or channels that CLI will use. For example, to set channels 20 and 24:
```python
cli_instance.bdb.channel = [20, 24]
```
**Note:** If you don't know the channel number, set all Zigbee channels (in range 11-26).
Join or create a network call:
```
cli_instance.bdb.start()
```
As soon as board's `NETWORK_LED` is turned on, CLI is commissioned and ready to control Zigbee devices. To wait for CLI to join network, call:
```python
cli_instance.wait_until_connected(timeout=10)
```
This call blocks until CLI is connected or times out, returns `None` if CLI is not connected, and self if connected.
When connected, you can send commands and read responses.
### Discovering the device addresses
To control a Zigbee device, you must know its long address (the short address can change).
To get to know the short address, use the discovery process. Then, resolve the short address into a long one.
The device can be specified by the input and output clusters. For example, for the dimmable light bulb:
- input clusters:
- On/Off cluster (Cluster ID: 0x0006)
- Level cluster (Cluster ID: 0x0008)
- output clusters:
- no output clusters
**Note:** For all Zigbee-related information, including cluster ID and attribute, see [ZCL Specification].
#### Getting the device short address
Before discover a device, import the basic ZCL-related constants to make the discovery process easier.
The constants are defined in the `constants.py` file located at `/zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/constants.py`.
To import constants to the script, run the following command:
```python
from zb_cli_wrapper.src.utils.cmd_wrappers.zigbee import constants
```
To discover a Zigbee device like a light bulb, use the match descriptor request.
For example, use the match_desc function from zdo:
```python
response = cli_instance.zdo.match_desc([constants.ON_OFF_CLUSTER, constants.LVL_CTRL_CLUSTER], [])
>>> response
[MatchedEndpoint(src_add='FD57', id='10')]
```
This function returns a list of namedtuples `MatchedEndpoint` as response. A single tuple stores information about
a single device as `(src_add, id)`.
It may take up about minute to find all matching devices. The list can be long.
#### Resolving a short address into a long address
To resolve a short address (stored in `response[0][0]` or `response[0].src_add`) into a long address, run the following command:
```python
light_bulb_eui64 = cli_instance.zdo.ieee_addr(response[0].src_add)
>>> hex(light_bulb_eui64)
'0xb010ead45b1b20c'
>>> light_bulb_eui64.as_hex
'F4CE360FAD6A60F0'
```
### Reading from the device
To read the device cluster attributes, use `zcl.readattr`:
```
response_attr = cli.zcl.readattr(eui64, attr, ep)
```
The function returns the read data as an instance of the `Attribute` class. It also takes an instance of the same class as argument. For every cluster attribute to be read, create one instance of the `Attribute` class.
To import a class, add at the beginning of your script:
```
from zb_cli_wrapper.src.utils.zigbee_classes.clusters.attribute import Attribute
```
To read the brightness level of a light bulb or another of its attributes, create an `Attribute` object:
```
bulb_lvl_attr = Attribute(cluster=constants.LVL_CTRL_CLUSTER, id=constants.LVL_CTRL_CURR_LVL_ATTR, type=constants.TYPES.UINT8, name="Bulb level")
```
Here, `id=constants.LVL_CTRL_CURR_LVL_ATTR` corresponds to the `current level` attribute (which in a light bulb usually corresponds to the brightness of the bulb) and `type` determines the type of the attribute (defined in [ZCL Specification]).
To read the `current level` attribute defined in `bulb_lvl_attr`, run the following command:
```python
response_attr = cli_instance.zcl.readattr(light_bulb_eui64, bulb_lvl_attr, ep=response[0][1])
>>> response_attr
Attribute Bulb level: 255
```
### Controlling the device
To change the brightness of the bulb, you must send the `Move to level` command defined in the [ZCL Specification]).
To send ZCL commands, use `zcl.generic`:
```python
cli_instance.zcl.generic(eui64, ep, cluster, profile, cmd_id, payload=None)
```
In this command:
- `profile` determines the Zigbee application profile. As default, the standard Home Automation Profile (ID: 0x0104) is used.
- `cmd_id` is the ID of the command to be sent, as defined in the [ZCL Specification]).
- `payload` is an additional argument if the specified command requires additional values.
**Note**: `Payload` is given as a list of tuples, where a tuple is: (value, type_of_value).
To set the light bulb brightness to 50%, run the following command:
```python
cli_instance.zcl.generic(eui64=light_bulb_eui64, ep=response[0][1], cluster=constants.LVL_CTRL_CLUSTER, profile=constants.DEFAULT_ZIGBEE_PROFILE_ID, cmd_id=constants.LVL_CTRL_MV_TO_LVL_CMD, payload=[(0x7F, constants.TYPES.UINT8), (1, constants.TYPES.UINT16)])
```
Here, `payload` is given as a new value of the `current level` attribute: `7F` (corresponds to 127 in decimal, which is 50% of 255, that is the maximum value of `current level` attribute) and transition time equals `1` (corresponds to 1 in decimal, which is equal to 0.1 seconds of transition time).
By reading the cluster attribute again, you can confirm the change of the attribute value.
### Closing CLI and the connection with the board
Use `close_cli()` method to softly close CLI and the connection with the board:
```python
cli_instance.close_cli()
```
[Zigbee CLI Agent example]: https://infocenter.nordicsemi.com/topic/sdk_tz_v4.0.0/zigbee_example_cli_agent.html
[Python 3 download]: https://www.python.org/downloads/
[pip install]: https://pip.pypa.io/en/stable/installing.html
[pypi zb-cli-wrapper]: https://pypi.org/project/zb-cli-wrapper
[nRF Connect for Desktop]: https://www.nordicsemi.com/Software-and-Tools/Development-Tools/nRF-Connect-for-desktop
[nrfutil]: https://github.com/NordicSemiconductor/pc-nrfutil
[pynrfjprog]: https://github.com/NordicSemiconductor/pynrfjprog
[nRF52840 DK]: https://www.nordicsemi.com/Software-and-Tools/Development-Kits/nRF52840-DK
[nRF52833 DK]: https://www.nordicsemi.com/Software-and-Tools/Development-Kits/nRF52833-DK
[nRF52840 Dongle]: https://www.nordicsemi.com/Software-and-Tools/Development-Kits/nRF52840-Dongle
[ZCL Specification]: https://zigbeealliance.org/wp-content/uploads/2019/12/07-5123-06-zigbee-cluster-library-specification.pdf | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/readme.md | readme.md |
from collections import Counter
import serial
import logging
from time import sleep
from zb_cli_wrapper.nrf_dev_map import nrfmap
from zb_cli_wrapper.src.utils.cmd_wrappers.zigbee import bdb, log, radio, zcl, zdo
from zb_cli_wrapper.src.utils.connection import UartConnection, AsciiConnection
from zb_cli_wrapper.src.utils.communicator import AdvancedLineCommunicator, CommandError
class ZigbeeAdvancedLineCommunicator(AdvancedLineCommunicator):
"""
This is a class to set the custom prompt in AdvancedLineCommunicator.
"""
def __init__(self, conn, prompt=">", success_prefix="done", error_prefix="error:", **kwargs):
super(ZigbeeAdvancedLineCommunicator, self).__init__(conn=conn, prompt=prompt, success_prefix=success_prefix,
error_prefix=error_prefix, **kwargs)
class ZbCliDevice(object):
def __init__(self, segger="", cdc_serial="", com_port=""):
""" Cli device is connected by com_port but instead of com_port name, cdc_serial or segger number can be used
You have to provide one (and only one) of the following:
* segger='YOUR_SEGGER_NBR' - to connect to device by given segger number
* cdc_serial='YOUR_CDC_SERIAL' - to connect to device by given cdc_serial
* com_port='YOUR_COM_PORT_NAME' - to connect to device connected to specific com_port
"""
if Counter([segger, cdc_serial, com_port]).get("") != 2:
raise ValueError("You have to provide one and only one of the following: segger, cdc_serial, com_port")
self._cli = None
self.segger = segger
self.cdc_serial = cdc_serial
self.com_port = com_port
self.connection_handler = UartConnection
self.connection_wrapper = AsciiConnection
self.communicator_handler = ZigbeeAdvancedLineCommunicator
self.cli_default_baud_rate = 115200
self.baud_rate = self.cli_default_baud_rate
if segger:
try:
self.com_port = nrfmap.ComPortMap.get_com_ports_by_id(self.segger, [nrfmap.Vendor.Segger])[0]
except KeyError:
raise ValueError("There is no board with seggger: {}".format(self.segger))
if cdc_serial:
try:
self.com_port = nrfmap.ComPortMap.get_com_ports_by_id(self.cdc_serial, [nrfmap.Vendor.CDC])[0]
except KeyError:
raise ValueError("There is no board with cdc_serial: {}".format(self.cdc_serial))
if self.com_port is None:
raise ValueError("Com Port is {}".format(self.com_port))
self.bdb = bdb.CommandWrapper(cli_getter=self.get_cli, dev=self)
self.log = log.CommandWrapper(cli_getter=self.get_cli, dev=self)
self.radio = radio.CommandWrapper(cli_getter=self.get_cli, dev=self)
self.zcl = zcl.CommandWrapper(cli_getter=self.get_cli, dev=self)
self.zdo = zdo.CommandWrapper(cli_getter=self.get_cli, dev=self)
def get_cli(self):
"""
Returns the current CLI object instance. Pass this to command wrappers, so that they can update their CLI handles
when needed.
"""
return self.cli
@property
def cli(self):
"""
CLI communicator. Initializes on the first usage. Make sure that this property is not called in the constructor.
"""
if not self._cli:
self._cli = self.create_cli_communicator()
self._configure_cli()
return self._cli
@cli.setter
def cli(self, value):
self._cli = value
def _configure_cli(self):
"""
Abstract method to configure the CLI communicator. Called after create_cli_communicator().
To be overloaded in child classes.
NOTE: Do not access the communicator with self.cli - use self._cli instead!
"""
pass
def close_cli(self):
"""
Closes the CLI connection and stops the communicator thread.
"""
if self._cli:
logging.info("Closing CLI...")
self._cli.stop()
self._cli._conn.close()
self._cli = None
logging.info("... CLI closed.")
def create_cli_communicator(self):
"""
Create the communicator that allows to read and write data to and from CLI.
Return:
Communicator (ZigbeeAdvancedLineCommunicator) for the interaction with CLI.
"""
self.connection_params = {"port": self.com_port, "baudrate": self.baud_rate or self.cli_default_baud_rate}
# Starting up the connection handler.
conn = self.connection_handler(**self.connection_params)
# Open communicator.
# Perform 10 attempts to avoid PermissionError(13, 'Access is denied.', None, 5) on Windows.
attempts = 10
while attempts:
try:
logging.info("Trying to open com port {}. Attempts left: {}.".format(self.com_port, attempts))
attempts -= 1
conn.open()
conn._serial.dsrdtr = False
except serial.serialutil.SerialException as e:
sleep(1)
if not attempts:
raise e
continue
break
logging.info("Com port {} opened successfully.".format(self.com_port))
# Some connections may need wrappers to parse the output properly.
if self.connection_wrapper:
comm = self.communicator_handler(self.connection_wrapper(conn))
else:
comm = self.communicator_handler(conn)
comm.start()
return comm
def wait_until_connected(self, timeout=10):
"""
Waits until CLI is connected to the network or the function times out.
Args:
timeout (int): Timeout after which the function will stop attempts to connect and will return None.
Return:
Self (ZbCliDevice) specified in the parameter or None if CLI is unable to connect to the network.
"""
for x in range(timeout):
try:
short_addr_resp = self.zdo.short_addr
except CommandError:
logging.info("Trying to connect cli to network. Attempts left: {:d}".format(timeout - x - 1))
sleep(1)
else:
if isinstance(short_addr_resp, int):
logging.info("CLI short address: 0x{:04X}".format(short_addr_resp))
return self
logging.info("Can not connect CLI to network.")
self.close_cli()
return None | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/zb_cli_dev.py | zb_cli_dev.py |
import ipaddress
import threading
import logging
import time
import re
import string
import datetime
class TimeoutError(Exception):
pass
class CommandError(Exception):
pass
class CommunicatorError(Exception):
pass
class BaseCommunicator(threading.Thread):
def __newline_handler(self, line):
print(line)
def __init__(self, conn, newline_handler=__newline_handler, read_size=128, wait_time=0.001):
"""
Create base communicator. Prints to STDOUT every line that shows up on conn.read
NOTE: if the existing object has debug_logfile field (IOStream), the received and written data
will be written to it.
Args:
conn (connection object): connection object that will be used to send and receive data
newline_handler (callable): callback to handle every line that appears on conn
read_size (int): the size of a buffer for a single read attempt
wait_time (float): delay, in seconds, between input stream availability checks
"""
super(BaseCommunicator, self).__init__()
self.daemon = True
self.logger = logging.getLogger(__name__)
self._conn = conn
self._newline_handler = newline_handler
self._read_size = read_size
self._wait_time = wait_time
self._line = ""
self._reading_active = threading.Event()
self.debug_logfile = None
def run(self):
self._reading_active.set()
while self._reading_active.is_set():
for c in self._conn.read(self._read_size):
self._line += c
if c == "\n":
if not getattr(self.debug_logfile, 'closed', True):
ts = datetime.datetime.now().strftime("%H:%M:%S.%f")[:-3] # strip to milliseconds
self.debug_logfile.write(f"{ts} rcvd: {self._line.encode('ascii', 'ignore')}\n")
self._newline_handler(self._line)
self._line = ""
time.sleep(self._wait_time)
def write(self, b):
try:
log_data = b.encode('utf-8')
except AttributeError:
log_data = b
if not getattr(self.debug_logfile, 'closed', True):
ts = datetime.datetime.now().strftime("%H:%M:%S.%f")[:-3] # strip to milliseconds
self.debug_logfile.write(f"{ts} send: {log_data}\n")
return self._conn.write(b)
def stop(self):
self._reading_active.clear()
self.join()
self.logger.debug("Data left in the buffer: '{}'".format(self._line))
def __str__(self):
return self.__repr__()
def __repr__(self):
return repr(self._conn)
class LineCommunicator(BaseCommunicator):
ASYNC_LOGS = []
def __newline_handler(self, line):
if line == "\r\n":
return
self._lines_lock.acquire()
if self.is_async(line):
self._async_lines.append(line)
else:
self._lines.append(line)
self._lines_lock.release()
def __init__(self, conn, newline_handler=None, **kwargs):
""" Create CLI line communicator.
Args:
conn (connection object): connection object that will be used to send and receive data
newline_handler (callable): callback to handle every line that appears on conn.
Defaults to __newline_handler
kwargs: as in parent class
"""
if newline_handler is None:
newline_handler = self.__newline_handler
super(LineCommunicator, self).__init__(conn, newline_handler, **kwargs)
self._lines = []
self._async_lines = []
self._lines_lock = threading.Lock()
def is_async(self, line):
"""
Checks whether or not given line is an asynchronous log. Uses ASYNC_LOGS variable
Args:
line (str): line to be checked
Returns:
True is line is an asynchronous log, False otherwise.
"""
for r in self.ASYNC_LOGS:
if r.search(line):
return True
return False
def readlines(self, async_log=False):
"""
Returns all entries from line buffer or from asynchronous line buffer
Args:
async_log (bool): if True, uses asynchronous buffer
Returns:
List with lines or empty list if buffer is empty
"""
if async_log:
buffer = self._async_lines
else:
buffer = self._lines
self._lines_lock.acquire()
lines = [line for line in buffer]
buffer.clear()
self._lines_lock.release()
return lines
def readline(self, async_log=False):
"""
Returns first entry from line buffer or from asynchronous buffer
Args:
async_log (bool): if True, uses asynchronous buffer
Returns:
Str with line or None if buffer is empty
"""
if async_log:
buffer = self._async_lines
else:
buffer = self._lines
line = None
self._lines_lock.acquire()
if len(buffer):
line = buffer.pop(0)
self._lines_lock.release()
return line
def __read(self, size):
result = ""
self._lines_lock.acquire()
while self._lines and len(result) < size:
line = self._lines.pop(0)
left = (size - len(result))
if len(line) > left:
self._lines.insert(0, line[left:])
result += line[:left]
self._lines_lock.release()
return result
def read(self, size=-1):
if size == -1:
return "".join(self.readlines())
return self.__read(size)
def write(self, data):
return super(LineCommunicator, self).write("{}\r\n".format(data))
def stop(self):
super().stop()
while True:
lines = self.readlines()
async_lines = self.readlines(async_log=True)
if not lines and not async_lines:
break
if lines:
self.logger.debug(f"Data left in the lines buffer: '{lines}'")
if async_lines:
self.logger.debug(f"Data left in the asynchronous lines buffer: '{async_lines}'")
class PingError(Exception):
pass
class BorderRouterCommunicator(LineCommunicator):
PING_CMDS = {
4: "ping",
6: "ping6"
}
PING_RE = re.compile(r"(\d+)\sbytes\sfrom\s(.*):\sicmp_[r|s]eq=(\d+)\sttl=(\d+)\stime=(\d+\.?\d*)\sms")
def __init__(self, conn, read_size=1280, wait_time=0.001):
super(BorderRouterCommunicator, self).__init__(conn, None, read_size, wait_time)
self._lines = []
self._lines_lock = threading.Lock()
def _wait_while_check_function_returns_false(self, check_function, timeout=1.0):
end_time = time.time() + timeout
checked_lines = []
while time.time() < end_time:
lines = self.readlines()
checked_lines.extend(lines)
matches = [check_function(line) is not None for line in lines]
if any(matches):
return sum(matches)
time.sleep(0.1)
else:
for line in checked_lines:
print(line)
raise TimeoutError("Check function has not returned True in the expected period of time.")
def ping(self, address, count=1, timeout=1.0):
ip = ipaddress.ip_address(address)
end_time = time.time() + (count * timeout)
self.write("{} -c {} {}".format(self.PING_CMDS[ip.version], count, address))
time.sleep(end_time - time.time())
try:
return self._wait_while_check_function_returns_false(self.PING_RE.match, timeout=0.1)
except TimeoutError:
raise PingError("Could not ping: {}".format(address))
class RfSwitchCommunicator(LineCommunicator):
BOOL_MAP = {"1\n": True, "0\n": False}
def __init__(self, conn, read_size=2):
super(RfSwitchCommunicator, self).__init__(conn, None, read_size)
self.lock_panel()
self.display_string("RF SWITCH")
def _wait_for_response(self, timeout=1.0):
end_time = time.time() + timeout
while time.time() < end_time:
line = self.readline()
if line != None:
return line
time.sleep(0.1)
else:
raise TimeoutError("Response has not been received in the expected period of time.")
def lock_panel(self):
self.write("SYSTEM:RWLOCK")
def unlock_panel(self):
self.write("SYSTEM:LOCAL")
def display_off(self):
self.write("DIAGNOSTIC:DISPLAY:STATE OFF")
def display_on(self):
self.write("DIAGNOSTIC:DISPLAY:STATE ON")
def display_string(self, string):
self.write("DIAGNOSTIC:DISPLAY \"{}\"".format(string))
def close_channel(self, channel):
self.write("CLOSE (@{})".format(channel))
def open_channel(self, channel):
self.write("OPEN (@{})".format(channel))
def is_channel_open(self, channel):
self.readlines()
self.write("OPEN? (@{})".format(channel))
response = self._wait_for_response()
return self.BOOL_MAP[response]
def is_channel_closed(self, channel):
self.readlines()
self.write("CLOSE? (@{})".format(channel))
response = self._wait_for_response()
return self.BOOL_MAP[response]
class OpenThreadBorderRouterCommunicator(BorderRouterCommunicator):
LOGIN_RE = re.compile(r"\w+\slogin:")
PASSWD_RE = re.compile(r"Password:")
PROMPT_RE = re.compile(r"\w+@\w+:~\$\s?\w*")
WELCOME_BANNER_RE = re.compile(r"^Raspbian\s+GNU/Linux\s+\d+\s+[\w\d]*\s+ttyS\d+")
def __init__(self, conn, read_size=1280, wait_time=0.001):
super(OpenThreadBorderRouterCommunicator, self).__init__(conn, read_size, wait_time)
def wait_for_boot(self, timeout=120):
timeout_time = time.time() + timeout
while time.time() < timeout_time:
matches = [self.WELCOME_BANNER_RE.match(line) is not None for line in self.readlines()]
if any(matches):
break
time.sleep(0.1)
else:
raise TimeoutError("Could not find welcome banner.")
def _wait_for_string_in_line_buffer(self, match_func, timeout=30):
timeout_time = time.time() + timeout
while time.time() < timeout_time:
if match_func(self._line):
break
time.sleep(0.1)
else:
raise TimeoutError("Could not find matching string in the line buffer.")
def _login(self, username, password, timeout=30):
self._wait_for_string_in_line_buffer(self.LOGIN_RE.match, timeout)
self._conn.write("{}\n".format(username))
self._wait_for_string_in_line_buffer(self.PASSWD_RE.match, timeout)
self._conn.write("{}\n".format(password))
def login(self, username, password, timeout=30):
timeout_time = time.time() + timeout
while time.time() < timeout_time:
try:
self._login(username, password, timeout=0.5)
except TimeoutError:
if any([self.PROMPT_RE.match(line) is not None for line in self.readlines()]):
break
self._conn.write("\n")
else:
raise TimeoutError("Could not to login to the Border Router.")
def run_command(self, command, wait_for_exit_code=True, stdout=None, timeout=30):
stdout = [] if stdout is None else stdout
stdout.extend([line.strip("\r\n") for line in self.readlines()])
if not wait_for_exit_code:
self._conn.write("{}\n".format(command))
return 0
self._conn.write("{}; echo \"Exit code: $?\"\n".format(command))
exit_code_re = re.compile(r"^Exit\scode:\s+(\d+)\s+")
timeout_time = time.time() + timeout
while time.time() < timeout_time:
matches = []
for line in self.readlines():
stdout.append(line.strip("\r\n"))
matches.append(exit_code_re.match(line))
matches = [match for match in matches if match is not None]
if any(matches):
return matches[0].group(1)
time.sleep(0.5)
else:
raise TimeoutError("Command has not been finished in the expected time period.")
class AdvancedLineCommunicator(LineCommunicator):
""" Advanced CLI line communicator.
This class allows to create an interface for sending commands and receiving
responses from colorful CLI with error and success messages.
"""
CLI_NEWLINE_ESC_CHAR = "\x1bE"
CLI_COLOR_RE = r"(.*)\x1b\[[^m]{1,7}m(.*)"
RETRY_ATTEMPTS = 1
VT100_CURSOR_RE = r"(.*)\x1b\[\d+D\x1b\[J(.*)"
LOG_RE = r"^.*<(info|debug|warning|error|none)> (.+?): (.+)"
def __init__(self, conn, prompt=None, success_prefix="done", error_prefix="error:", retry_on=None, **kwargs):
""" Create Advanced CLI line communicator.
Args:
conn (connection object): connection object that will be used to send and receive data
kwargs: as in parent class
prompt (str): CLI prompt string
success_prefix (str): case insensitive prefix, that is returned on successful command execution
error_prefix (str): case insensitive prefix, that is returned on command error, followed by error description
retry_on (str, list(str)): if a command prints a line matching any string in retry_on, it will be repeated
"""
super(AdvancedLineCommunicator, self).__init__(conn, self.__newline_handler, **kwargs)
self._prompt_re = re.compile(r"^{}\s+(.*)".format(prompt if prompt is not None else ".*~\$"))
self._color_re = re.compile(self.CLI_COLOR_RE)
self._vt100_cursor_re = re.compile(self.VT100_CURSOR_RE)
self._log_re = re.compile(self.LOG_RE)
self._success_prefix = success_prefix.lower()
self._error_prefix = error_prefix.lower()
self._last_command = ""
self.default_write_command_timeout = 1.0
self.clear_cmd = ""
self._retry_on = []
if retry_on:
if not isinstance(retry_on, list):
retry_on = [retry_on]
self._retry_on = retry_on
self._received_logs = []
def __newline_handler(self, line):
""" Looks for empty lines or VT100 newline escape sequence in a single line,
breaks it into list of strings representing received lines and puts into
internal line buffer or asynchronous line buffer (depending on is_async check)
Args:
line (str): line received over connection
"""
if line == "\r\n":
return
new_lines = line.split(self.CLI_NEWLINE_ESC_CHAR)
self._lines_lock.acquire()
for new_line in new_lines:
if self.is_async(new_line):
self._async_lines.append(new_line)
else:
self._lines.append(new_line)
self._lines_lock.release()
def _remove_eol_characters(self, line):
""" Removes every "\r" and "\n" character from input string.
Args:
line (str): input line
Returns:
str: line without "\r" and "\n" characters
"""
return "".join([c if c != "\r" and c != "\n" else "" for c in line])
def _remove_colors(self, line):
""" Removes every VT100 color escape sequence from input string.
Args:
line (str): input line
Returns:
str: line without VT100 color escape sequences
"""
colors_found = self._color_re.match(line)
while colors_found:
line = "".join(colors_found.groups())
colors_found = self._color_re.match(line)
return line
def _remove_prompt(self, line):
""" Removes every string prefix, equal to the prompt CLI prompt.
Args:
line (str): input line
Returns:
str: line without CLI prompt
"""
found_prompt = self._prompt_re.match(line)
if found_prompt:
return found_prompt.group(1)
return line
def _gather_logs(self, line):
""" Checks if the line is a log and retrieves it.
Args:
line (str): input line
Returns:
str: input line if not a log, empty string otherwise
"""
# Remove the screen clearing
found = self._vt100_cursor_re.match(line)
if found:
line = found.group(2)
# Find the log themselves
found = self._log_re.match(line)
if found:
self._received_logs.append({"level" : found.group(1),
"module" : found.group(2),
"string" : found.group(3)})
return ""
return line
def _remove_non_printable_characters(self, line):
""" Removes all ASCII non-printable characters in a line.
Args:
line (str): input line
Returns:
str: input line without all non-printable characters
"""
return ''.join([x if x in string.printable else '' for x in line])
def _wait_until_true(self, check_function, timeout):
""" Wait until check_function returns True or timeout occurs.
Args:
check_function (function): function to be called
timeout (float): maximum time, in seconds, within which check_function will be checked against returned value
Returns:
bool: True of called function returns False, False if timeout occurred.
"""
timeout_time = time.time() + timeout
while time.time() < timeout_time:
lines = self.readlines()
if any([check_function(line) for line in lines]):
return True
time.sleep(0.01)
return False
def _check_if_command_finished(self, line):
""" Receives a single line, filters it and checks if success message or failure
prefix was received.
Returns:
bool: True if success message was received.
Raises:
CommandError: if parsed line contains error message
"""
line = self._remove_colors(line) # Remove color escape characters
line = self._remove_eol_characters(line) # Remove Additional \r and \n characters
line = self._remove_prompt(line) # Prompt prefix
line = self._gather_logs(line) # Retrieve the logs
line = self._remove_non_printable_characters(line) # Remove the non-printable characters
self.logger.debug("CLI::{} RX: {}".format(self._conn, line))
if line.lower().startswith(self._success_prefix):
return True
elif line == self._last_command:
return False
elif line == "":
return False
elif any(r in line for r in self._retry_on):
self.logger.error("Error data detected: {}".format(line))
raise CommunicatorError()
elif line.lower().startswith(self._error_prefix):
raise CommandError(line[len(self._error_prefix):].strip())
else:
self.return_value.append(line)
return False
def write_command(self, command, wait_for_success=True, timeout=None):
""" Writes a command through CLI connection and wait either for success
message, error prefix or timeout event.
Args:
command (str): command to be called on CLI
wait_for_success (bool): if False, suppresses timeout event
timeout (float): maximum time, in seconds, within witch CLI should return command status
Returns:
list: containing received lines or None if no command output received.
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
timeout = timeout if timeout is not None else self.default_write_command_timeout
self._last_command = command
attempts = self.RETRY_ATTEMPTS + 1
while attempts:
try:
self.write(command)
self.logger.debug("CLI::{} TX: {}".format(self._conn, command))
self.return_value = []
# TODO: KRKNWK-3707 add support for asynchronous logs in write_command
self.received_done = self._wait_until_true(self._check_if_command_finished, timeout)
if wait_for_success and not self.received_done:
raise TimeoutError("{}: '{}' did not receive '{}'. So far got: {}".format(self, command, self._success_prefix, self.return_value))
return self.return_value if self.return_value else None
except CommunicatorError:
attempts -= 1
self.logger.error("{}: retrying to run cmd: '{}'".format(self, command))
else:
msg = f"Cannot execute '{command}' properly. No more attempts left"
self.logger.error(msg)
raise CommandError(msg)
def retrieved_logs(self, aquisition_time=0):
""" Returns the collected logs.
Args:
aquisition_time: time to collect logs
"""
self.clear()
self.empty_logs()
time.sleep(aquisition_time)
self._wait_until_true(self._check_if_command_finished, timeout=1.0)
self.return_value = []
return self._received_logs
def received_logs(self):
"""
Parses lines for 1 second and returns received logs
NOTE: **all** received logs that are present in buffer are returned, not only
the ones that showed up in 1 second
Returns:
List with received logs
"""
self._wait_until_true(self._check_if_command_finished, timeout=1.0)
self.return_value = []
return self._received_logs
def empty_logs(self):
self._received_logs = []
def clear(self):
""" Writes a new line character and reads all input lines in order to get
clean input for further CLI commands.
"""
logging.info(f"{self}: flushing IO")
self.write(self.clear_cmd)
self.readlines()
self.return_value = [] | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/communicator.py | communicator.py |
import os
import sys
import serial
import subprocess
import time
import signal
import logging
import textwrap
from zb_cli_wrapper.src.utils.utils import pretty_exc
from pynrfjprog.MultiAPI import MultiAPI
from pynrfjprog.API import APIError, DeviceFamily
class TimeoutError(Exception):
pass
class Connection(object):
def open(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, size):
raise NotImplementedError
def write(self, data, timeout):
raise NotImplementedError
class AsciiConnection(Connection):
def __init__(self, conn):
self._conn = conn
def __del__(self):
del self._conn
self._conn = None
def open(self):
self._conn.open()
def close(self):
self._conn.close()
def read(self, size):
data = self._conn.read(size)
return data.decode("utf-8", "ignore")
def write(self, data, timeout=5.0):
return self._conn.write(data.encode("utf-8", "ignore"), timeout)
def __repr__(self):
return self._conn.__repr__()
def __str__(self):
return self.__repr__()
def __getattr__(self, item):
return getattr(self._conn, item)
class RttConnection(Connection):
def __init__(self, jlink_snr, dev_family=DeviceFamily.NRF52,
rtt_channel=0, rtt_timeout=3.0, jlink_speed_khz=2000,
name=None):
super(RttConnection, self).__init__()
self._jlink_snr = int(jlink_snr)
self._dev_family = dev_family
self._rtt_channel = rtt_channel
self._rtt_timeout = rtt_timeout
self._jlink_speed_khz = jlink_speed_khz
self._api = None
self.name = name
def __del__(self):
if getattr(self, "_api", None) is None:
return
self.close()
def set_rtt_channel(self, rtt_channel):
self._rtt_channel = rtt_channel
def open(self):
if self._api is not None:
return
self._api = MultiAPI(self._dev_family)
self._api.open()
if self.name:
self._api._runner_process.name += "_{}".format(self.name)
self._api.connect_to_emu_with_snr(self._jlink_snr, self._jlink_speed_khz)
self._api.connect_to_device()
self._api.rtt_start()
timeout_time = time.time() + self._rtt_timeout
try:
while True:
if self._api.rtt_is_control_block_found():
break
if time.time() > timeout_time:
self.close()
raise TimeoutError(f"{self}: could not find RTT control block.")
time.sleep(0.01)
except APIError as e:
raise e
def close(self):
if self._api is None:
return
try:
if self._api.is_rtt_started():
self._api.rtt_stop()
if self._api.is_connected_to_device():
self._api.disconnect_from_device()
if self._api.is_connected_to_emu():
self._api.disconnect_from_emu()
except APIError as e:
logging.error(f"Error while closing {self}: {pretty_exc(e)}")
finally:
self._api.close()
self._api.terminate()
del self._api
self._api = None
def read(self, size=64):
if self._api is None:
return bytes([])
return bytes(self._api.rtt_read(self._rtt_channel, size, encoding=None))
def write(self, data, timeout=5.0):
if self._api is None:
return -1
return self._api.rtt_write(self._rtt_channel, data, encoding=None)
def __repr__(self):
s = f"{self.name}:" if self.name else ""
return s + "RTT@{}".format(self._jlink_snr)
class UartConnection(Connection):
def __init__(self, port, baudrate, name=None, rtscts=True, dsrdtr=True):
super(UartConnection, self).__init__()
self._port = port
self._baudrate = baudrate
self._rtscts = rtscts
self._dsrdtr = dsrdtr
self._serial = None
self.name = name
def __del__(self):
if self._serial is None:
return
self.close()
def open(self):
self._serial = serial.Serial(timeout=0, write_timeout=1.0)
# Workaround to avoid serial communication failures
# DTR - Data Terminal Ready
# DSR - Data Set Ready
# RTS - Request To Send
# CTS - Clear To Send
self._serial.dtr = True
self._serial.rtscts = self._rtscts
self._serial.dsrdtr = self._dsrdtr
self._serial.port = self._port
self._serial.baudrate = self._baudrate
self._serial.open()
def close(self):
if self._serial is None:
return
self._serial.close()
self._serial = None
def read(self, size):
if self._serial is None:
raise RuntimeError("Trying to read data from closed {}".format(self))
return self._serial.read(size)
def write(self, data, timeout=5.0):
if self._serial is None:
raise RuntimeError("Trying to write data to closed {}".format(self))
sent_count = 0
timeout_time = time.time() + timeout
while sent_count < len(data) and time.time() < timeout_time:
sent_count += self._serial.write(data[sent_count:])
self._serial.flush()
return sent_count
def __repr__(self):
if self.name is not None:
return f"{self.name}@{self._port}"
else:
return f"UartConn: {self._port}@{self._baudrate}"
class ProcessConnection(Connection):
def __init__(self, args, cwd, name=None):
self._args = args
self._cwd = cwd
self._proc = None
self.name = name
def __del__(self):
if self._proc is None:
return
self.close()
def open(self):
# Popen is unhappy if creationflags argument is provided on Linux.
# To solve this, pass args by unpacking a dictionary and include
# creationflags only on Windows.
kargs = {
'args': self._args,
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': self._cwd,
'bufsize': 0,
'shell': False
}
if not sys.platform.startswith('linux'):
kargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
else:
kargs['preexec_fn'] = os.setpgrp
self._proc = subprocess.Popen(**kargs)
def close(self):
if not sys.platform.startswith('linux'):
# Send CTRL+BREAK signal to a process group. We need to do that
# to make sure that subprocesses spawned by _proc are killed.
# This can be a case when, for example, _proc is a batch file which
# executes external programs.
#
# We can't use CTRL+C as Windows doesn't allow this for process
# groups (see http://msdn.microsoft.com/en-us/library/ms683155%28v=vs.85%29.aspx)
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
# Based on disscussion in https://stackoverflow.com/a/4791612
os.killpg(os.getpgid(self._proc.pid), signal.SIGKILL)
# Kill _proc. One might think that sending CTRL+BREAK to a process group
# kills everyone including parent. Nope. If parent is a batch file then,
# after sending CTRL+BREAK, a "Terminate batch job (Y/N)?" dialog is displayed,
# hence _proc.kill() is still needed.
if (self._proc.poll() == None):
self._proc.kill()
self._proc.wait()
del self._proc
self._proc = None
def read(self, size):
if self._proc is None:
return bytes([])
return self._proc.stdout.read(size)
def write(self, data, timeout=5.0):
if self._proc is None:
return -1
sent_count = 0
timeout_time = time.time() + timeout
while sent_count < len(data) and time.time() < timeout_time:
sent_count += self._proc.stdin.write(data[sent_count:])
self._proc.stdin.flush()
return sent_count
def __repr__(self):
s = ""
if self.name:
s += f"ProcConn({self.name})"
else:
s += f'ProcConn({textwrap.shorten(" ".join(self._args), width=16, placeholder="...")})'
return s | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/connection.py | connection.py |
import re
import yaml
class CommandParserError(Exception):
pass
class CommandWrapper(object):
""" This class implements a common parent for CLI command wrappers.
Args:
cli_getter: a reference to the function that returns CLI object instance.
dev: a reference to a device that uses this command wrapper. Useful to access device logger
Note:
The CLI object changes between test cases, but the board object does not,
thus the getter function has to be used to reference the current CLI
instance while sending commands/receiving responses.
The CLI:
CLI is any object, that implements the following method:
def write_command(self, command, wait_for_success=True, timeout=1.0)
Which returns a list of strings, representing command responses.
Usage:
In order to extend a board object, that implements CLI interface,
a simple cli property getter has to be implemented in every CLI device class:
def get_cli(self):
return self.cli
Afterwards, a command wrapper should be added to the board object
as its new property e.g.:
self.test = NewTestCommandsWrapper(cli_getter)
That way it is possible to access command groups with more intuitive way,
avoiding multiple class inheritance e.g.:
board.test.start()
board.test.configure.length(100)
Reasoning:
The class, that implements a command parsing logic, is not
an instance of a board and does not need access to all of its internal
properties to work correctly. Because of that, it is better to attach
command parser as a property with methods, than using multi level
inheritance.
Complex methods, that require sending and parsing multiple commands
should be implemented differently, using command wrappers as primitives.
"""
def __init__(self, cli_getter, dev=None):
self.cli_getter = cli_getter
if dev is not None:
self.dev = dev
@property
def cli(self):
""" CLI communicator. Initializes on first usage. Make sure that this property is not called in constructor.
"""
return self.cli_getter()
def get_cli(self):
""" Returns current CLI object instance.
"""
return self.cli
def clear_buffer(self):
""" Writes a new line character and reads all input lines in order to get
clean input for further CLI commands.
"""
self.cli.clear()
@staticmethod
def _parse_values(results_dict):
""" Iterates through all keys and converts:
- values with units into (value, 'unit') tuples
- true/false into actual bool variables
For example:
"123.456ms" -> (123.456, 'ms')
"12,34 %" -> (12.34, '%')
"true" -> True
"""
value_unit_re = re.compile(r'^\s*([0-9,\.,\,]+)\s*([^0-9]+)\s*$')
true_re = re.compile(r'^\s*true\s+$', flags=re.IGNORECASE)
false_re = re.compile(r'^\s*false\s+$', flags=re.IGNORECASE)
results = {}
for key, value in results_dict.items():
if isinstance(value, dict):
results[key] = CommandWrapper._parse_values(value)
elif re.match(value_unit_re, str(value)):
value_unit = re.match(value_unit_re, value).groups()
results[key] = (float(value_unit[0]), value_unit[1])
elif re.match(true_re, str(value)):
results[key] = True
elif re.match(false_re, str(value)):
results[key] = False
else:
results[key] = (value)
return results
@staticmethod
def _parse_yaml_results(responses, parse_values=True):
""" Parses command results presented in YAML format.
Params:
responses: list of lines with CLI response
parse_units: if True, when applicable the 'results' values are
being parsed. See _parse_values to see how the parsing is done.
Returns:
results (dict): keys are the same as inside received YAML
"""
results_yaml = ''
# Check if there are any responses to parse.
if responses is None:
return None
# Treat every line with ':' character as a valid YAML input line.
for response in responses:
if response.find(':') == -1:
continue
# TODO: remove the .replace(...) once CLI returns whitespaces instead of tabs
results_yaml += '\n' + response.replace('\t', ' ')
# Parse YAML string into dictionary.
try:
results_dict = yaml.full_load(results_yaml)
except yaml.scanner.ScannerError as e:
raise CommandParserError("Cannot results as YAML:\n{}".format(results_yaml)) from e
if parse_values:
# Convert values with unit into tuples, return.
return CommandWrapper._parse_values(results_dict)
else:
return results_dict
@staticmethod
def _parse_ascii_table(data, header_offset=0, data_offset=2, col_char="|"):
"""
Parser for the ascii table:
Input format:
| ID | RLOC16 | Timeout | Age | LQ In | C_VN |R|S|D|N| Extended MAC |
+-----+--------+------------+------------+-------+------+-+-+-+-+------------------+
| 1 | 0x4401 | 10 | 2 | 3 | 116 |0|1|0|0| 6a2a8242654da4c9 |
Args:
data (list, str): string or list of lines with child table
header_offset (int): location of header line (indexing start with 0)
data_offset (int): location of first data line (indexing start with 0)
col_char (str): character used as column separator
Note: method auto detects the left-most and right-most col_char
Returns:
List with dicts of child table data filled with strings:
[ {
'ID': '1'
'RLOC16': '0x4401',
'Timeout': '10',
'Age': '2',
'LQ In': '3',
'C_VN': '116',
'R': '0',
'S': '1',
'D': '0',
'N': '0',
'Extended MAC': '6a2a8242654da4c9'},
...
]
Raises:
CommandParserError if there are exceptions while parsing
"""
try:
if isinstance(data, str):
data = data.split("\n")
result = []
# Skip first and last, because those will be empty
headers = [s.strip() for s in data[header_offset].split(col_char)]
# Auto detecting right-most and left-most col_char
slice_with_actual_data = slice(1 if data[header_offset].startswith(col_char) else 0,
-1 if data[header_offset].endswith(col_char) else None)
headers = headers[slice_with_actual_data]
for line in data[data_offset:]:
if not line:
continue
splitted_line = [s.strip() for s in line.split(col_char)]
splitted_line = splitted_line[slice_with_actual_data]
if splitted_line:
result.append(dict(zip(headers, splitted_line)))
return result
except Exception as e:
raise CommandParserError(f"Cannot parse ascii table data") from e | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/cmd_wrappers/base.py | base.py |
# Common constants, defined by Zigbee specification.
from enum import Enum
DEFAULT_ZIGBEE_PROFILE_ID = 0x0104 # HA profile ID
BROADCAST_ADDRESS_ALL_DEVICES=0xffff
UNKNOWN_IEEE_ADDRESS = 0xFFFFFFFFFFFFFFFF
BASIC_CLUSTER = 0x0000
IDENTIFY_CLUSTER = 0x0003
ON_OFF_CLUSTER = 0x0006
LVL_CTRL_CLUSTER = 0x0008
OTA_CLUSTER = 0x0019
DOOR_LOCK_CLUSTER = 0x0101
COLOR_CTRL_CLUSTER = 0x0300
TEMPERATURE_CLUSTER = 0x0402
PRESSURE_CLUSTER = 0x0403
ZCL_VERSION_ATTR = 0x0000
IDENTIFY_IDENTIFY_TIME_ATTR = 0x0000
ON_OFF_ONOFF_ATTR = 0x0000
LVL_CTRL_CURR_LVL_ATTR = 0x0000
DOOR_LOCK_LOCK_STATE = 0x0000
COLOR_CTRL_CURR_HUE_ATTR = 0x0000
COLOR_CTRL_CURR_SAT_ATTR = 0x0001
OTA_CURRENT_FILE_VERSION_ATTR = 0x0002
OTA_UPGRADE_SERVER_ID_ATTR = 0x0000
IDENTIFY_IDENTIFY_CMD = 0x00
IDENTIFY_IDENTIFY_QUERY_CMD = 0x01
IDENTIFY_EZ_MODE_INVOKE_CMD = 0x02
IDENTIFY_UPDATE_COMMISSION_STATE_CMD = 0x03
ON_OFF_OFF_CMD = 0x00
ON_OFF_ON_CMD = 0x01
LVL_CTRL_MV_TO_LVL_CMD = 0x00
DOOR_LOCK_LOCK_DOOR_CMD = 0x00
DOOR_LOCK_UNLOCK_DOOR_CMD = 0x01
COLOR_CTRL_MV_TO_HUE_CMD = 0x00
COLOR_CTRL_MV_TO_SAT_CMD = 0x03
COLOR_CTRL_MV_TO_HUE_SAT_CMD = 0x06
OTA_QUERY_NEXT_IMAGE_RESPONSE_CMD = 0x02
CONFIGURE_REPORTING_CMD = 0x06
READ_ATTRIBUTES_CMD = 0x00
FRAME_CTRL_TYPE_PROFILE_WIDE = 0b00
FRAME_CTRL_TYPE_CLUSTER_SPECIFIC = 0b01
FRAME_CTRL_DIRECTION_TO_CLIENT = 0b1
FRAME_CTRL_DIRECTION_TO_SERVER = 0b0
FRAME_CTRL_STR = "0b000{disable_def_response:01b}{direction:01b}{manuf_specific:01b}{type:02b}"
OTA_QUERY_NEXT_IMAGE_RESPONSE_CTRL = int(FRAME_CTRL_STR.format(type=FRAME_CTRL_TYPE_CLUSTER_SPECIFIC, manuf_specific=False,
direction=FRAME_CTRL_DIRECTION_TO_CLIENT, disable_def_response=True), 2)
CONFIGURE_REPORTING_CTRL = int(FRAME_CTRL_STR.format(type=FRAME_CTRL_TYPE_PROFILE_WIDE, manuf_specific=False,
direction=FRAME_CTRL_DIRECTION_TO_SERVER, disable_def_response=True), 2)
REP_CONFIG_SEND_REPORTS = 0x00
REP_CONFIG_RECV_REPORTS = 0x01
REP_CONFIG_FORMAT_STR = "{direction:02X}{attribute.id:04X}{attribute.type:02X}{min_interval:02X}{max_interval:02X}{rep_change}"
ZCL_RAW_FORMAT_STR = "{frame_ctrl:02X}{seq_num:02X}{cmd_id:02X}{payload}"
class TYPES:
BOOL = 0x10
UINT8 = 0x20
UINT16 = 0x21
UINT32 = 0x23
UINT64 = 0x27
SINT8 = 0x28
SINT16 = 0x29
SINT64 = 0x2f
ENUM8 = 0x30
MAP8 = 0x18
EUI64 = 0xF0
STRING = 0x42
class ZCLDirection(Enum):
DIRECTION_CLI_TO_SRV = 0x00
DIRECTION_SRV_TO_CLI = 0x01
CLI_ENDPOINT = 64 # Default Zigbee CLI endpoint
DOOR_LOCK_ENDPOINT = 8 # Default Door Lock endpoint
LIGHT_BULB_ENDPOINT = 10 # Default Light Bulb endpoint
LIGHT_SWITCH_ENDPOINT = 1 # Default Light Switch endpoint
THINGY_PROXY_THINGY_ENDPOINT = 10 # One of the endpoints of Thingy on the Thingy Proxy
OTA_CLIENT_ENDPOINT = 10 # Default OTA Client endpoint
DOOR_LOCK_OPEN = 1
DOOR_LOCK_CLOSE = 0 | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/constants.py | constants.py |
import re
from ..base import CommandWrapper as BaseCmdWrapper
from . import constants
from zb_cli_wrapper.src.utils.zigbee_classes.clusters.attribute import Attribute
class Commands:
""" CLI Commands to be used with firmware which includes Zigbee CLI component with ZCL commands.
"""
# Main command used in order to access ZCL subcommands
MAIN = 'zcl'
# Available ZCL commands
PING = ' '.join([MAIN, 'ping {eui64:016X} {length}'])
READATTR = ' '.join([MAIN, 'attr read {eui64:016X} {ep} {cluster:01X} {direction} {profile:04X} {attr:02X}'])
WRITEATTR = ' '.join([MAIN, 'attr write {eui64:016X} {ep} {cluster:01X} {direction} {profile:04X} {attr_id:02X} {attr_type:02X} {attr_value:X}'])
GENERIC_NO_PAYLOAD = ' '.join([MAIN, 'cmd {eui64:016X} {ep} {cluster:01X} -p {profile:04X} {cmd_id:01X}'])
GENERIC_WITH_PAYLOAD = ' '.join([MAIN, 'cmd {eui64:016X} {ep} {cluster:01X} -p {profile:04X} {cmd_id:01X} -l {payload}'])
SUBSCRIBE = ' '.join([MAIN, 'subscribe on {eui64:016X} {ep} {cluster:01X} {profile:04X} {attr_id:02X} {attr_type} {min_interval} {max_interval}'])
RAW = ' '.join([MAIN, 'raw {eui64:016X} {ep} {cluster:01X} {profile:04X} {payload_hex}'])
class CommandWrapper(BaseCmdWrapper):
""" This class adds an interface for sending ZCL commands and receiving parsed
responses through Zigbee CLI by calling methods on a device instance.
"""
def readattr(self, eui64, attr, timeout=20.0, direction=constants.ZCLDirection.DIRECTION_CLI_TO_SRV, ep=constants.CLI_ENDPOINT, profile=constants.DEFAULT_ZIGBEE_PROFILE_ID):
""" Sends "readattr" command and parses received response.
Args:
eui64 (int): destination node long address
direction (ZCLDirection): direction of the ZCL frame (Client -> Server or Server -> Client)
ep (int): destination node endpoint number
profile (int): id of the profile, containing the cluster
attr (object): Attribute object instance, representing attribute to be read
timeout (float): maximum time, in seconds, within which CLI should return command response
Returns:
object: new Attribute object with values set according to the received response
Raises:
ValueError: if received result with an unknown formatting
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
response_re = re.compile('^(ID\:\ *)(.*)(Type\:\ *)(.*)(Value\:\ *)(.*)')
cmd = Commands.READATTR.format(eui64=eui64, ep=ep, cluster=attr.cluster, direction='-c' if direction.value == constants.ZCLDirection.DIRECTION_SRV_TO_CLI.value else '', profile=profile, attr=attr.id)
response = self.cli.write_command(cmd, timeout=timeout)
if response is None:
raise ValueError("Expected read attribute response, but no response received")
resp_found = response_re.match(response[-1])
if resp_found:
resp = resp_found.groups()
return Attribute(cluster=attr.cluster, id=int(resp[1]), type=int(resp[3], 16), value=resp[5], name=attr.name)
else:
raise ValueError("Received result in unexpected format: {}".format(response))
def writeattr(self, eui64, attr, timeout=20.0, direction=constants.ZCLDirection.DIRECTION_CLI_TO_SRV, ep=constants.CLI_ENDPOINT, profile=constants.DEFAULT_ZIGBEE_PROFILE_ID):
""" Sends "writeattr" command and parses received response.
Args:
eui64 (int): destination node long address
direction (ZCLDirection): direction of the ZCL frame (Client -> Server or Server -> Client)
ep (int): destination node endpoint number
profile (int): id of the profile, containing the cluster
attr (object): Attribute object instance, representing attribute to be written
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
ValueError: if attempts to write unsupported value type
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.WRITEATTR.format(eui64=eui64, ep=ep, cluster=attr.cluster, direction='-c' if direction.value == constants.ZCLDirection.DIRECTION_SRV_TO_CLI.value else '', profile=profile, attr_id=attr.id, attr_type=attr.type, attr_value=attr.value)
return self.cli.write_command(cmd, timeout=timeout)
@staticmethod
def _parse_ping_response(response):
""" Parse a single ping response and return a round trip time as integer (in milliseconds)
Args:
response (array of string): ping command response(s)
Raises:
ValueError: if unable to find ping response in responses
Return:
ping_time (int) in ms
"""
ping_re = re.compile(r'Ping time\:\ \b(\d+)\b ms')
ping_response_match = ping_re.match(response)
if ping_response_match is not None:
return int(ping_response_match.groups()[0])
raise ValueError("Unable to find a ping time in responses: {}".format(response))
def ping(self, eui64, timeout=20.0, length=30, wait_for_response=True):
""" Issue a ping-style command to another CLI device of address `eui64` by using `length` bytes of payload.
Args:
eui64 (int): destination node long address
timeout (float): maximum time, in seconds, within which CLI should return command response
length (int): ping command payload length
wait_for_response (bool): if False, suppresses timeout exception
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
ValueError: if unable to parse ping response
Return:
ping_time (int) in ms
"""
cmd = Commands.PING.format(eui64=eui64, length=length)
response = self.cli.write_command(cmd, wait_for_success=wait_for_response, timeout=timeout)
if not wait_for_response:
return None
return CommandWrapper._parse_ping_response(''.join(response))
def generic(self, eui64, ep, cluster, profile, cmd_id, payload=None, timeout=20.0):
""" Issue a generic command with no payload.
Args:
eui64 (int): destination node long address
ep (int): destination endpoint
cluster (int): destination ZCL cluster
profile (int): profile to which the destination ZCL cluster belongs
cmd_id (int): ID of the ZCL command to issue
payload (list): payload of the command - list of tuples to build payload from
tuple shall contain value to send and type of value
e.g. [(0x24, constants.TYPES.UINT8), (1, constants.TYPES.UINT16)]
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
TypeError: if type of value given as payload can not be handled
"""
cmd = Commands.GENERIC_NO_PAYLOAD.format(eui64=eui64, ep=ep, cluster=cluster, profile=profile, cmd_id=cmd_id)
if payload:
octet_list = []
for data in payload:
value_hex_string = ''
byte_len = 0
if data[1] is constants.TYPES.BOOL:
byte_len = 1
value_formatter = "".join(["{value:0", str(byte_len * 2), "X}"])
value_hex_string = value_formatter.format(value=data[0])
elif data[1] in range(constants.TYPES.UINT8, constants.TYPES.UINT64 + 1):
byte_len = data[1] - constants.TYPES.UINT8 + 1
value_formatter = "".join(["{value:0", str(byte_len * 2), "X}"])
value_hex_string = value_formatter.format(value=data[0])
elif data[1] in range(constants.TYPES.SINT8, constants.TYPES.SINT64 + 1):
byte_len = data[1] - constants.TYPES.SINT8 + 1
value_formatter = "".join(["{value:0", str(byte_len * 2), "X}"])
# Handle signed int to string of hex conversion
mask = (2 ** (8 * byte_len) - 1)
value_hex_string = value_formatter.format(value=(data[0] & mask))
elif data[1] in [constants.TYPES.ENUM8, constants.TYPES.MAP8]:
byte_len = 1
value_formatter = "".join(["{value:0", str(byte_len * 2), "X}"])
value_hex_string = value_formatter.format(value=data[0])
else:
# Data types other than bool, or singed/unsigned int are handled by default
if type(data[0]) is str:
octet_list.append(data[0])
else:
raise TypeError("Can not parse type of argument, argument not a string - can not handle by default")
# Take string of hex and put from end to start by taking two chars at once - little endian byte order
for x in range(byte_len * 2, 0, -2):
octet_list.append(value_hex_string[x - 2: x])
cmd = Commands.GENERIC_WITH_PAYLOAD.format(eui64=eui64, ep=ep, cluster=cluster, profile=profile,
cmd_id=cmd_id, payload="".join(octet_list))
self.cli.write_command(cmd, timeout=timeout)
def subscribe(self, eui64, ep, cluster, attr, profile=constants.DEFAULT_ZIGBEE_PROFILE_ID, min_interval=None, max_interval=None):
"""
Sends a ZCL Configure Request Request.
Args:
eui64 (int): destination node long address
ep (int): destination endpoint
cluster(int): destination ZCL cluster
attr(Attribute): attribute to report
profile(int): profile to which the destination ZCL cluster belongs
min_interval(int): minimal interval between attribute reports
max_interval(int): maximal interval between attribute reports
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.SUBSCRIBE.format(eui64=eui64, ep=ep, cluster=cluster, profile=profile, attr_id=attr.id,
attr_type=attr.type, min_interval=('' if min_interval is None else min_interval),
max_interval=('' if max_interval is None else max_interval))
self.cli.write_command(cmd, timeout=10.0)
def raw(self, eui64, ep, cluster, payload_hex, profile=constants.DEFAULT_ZIGBEE_PROFILE_ID, timeout=20):
""" Sends RAW ZCL command and parses received response.
Args:
eui64 (int): destination node long address
ep (int): destination node endpoint number
cluster (int): destination ZCL cluster
payload_hex (str): hex string representing ZCL payload, starting from Frame Control Field.
profile (int): id of the profile, containing the cluster
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
ValueError: if attempts to write unsupported value type
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.RAW.format(eui64=eui64, ep=ep, cluster=cluster, profile=profile, payload_hex=payload_hex)
return self.cli.write_command(cmd, timeout=timeout)
def send_color_control_frame(self, target_ficr_eui64: int):
""" Sends color control frame to device with given id
Args:
target_ficr_eui64 (int): EUI64 address of target's device
"""
# Generate payload with some arbitrary values of hue and saturation.
move_to_hue_and_saturation_payload = [(0x24, constants.TYPES.UINT8),
(0x24, constants.TYPES.UINT8),
(1, constants.TYPES.UINT16)]
self.generic(target_ficr_eui64,
constants.THINGY_PROXY_THINGY_ENDPOINT,
constants.COLOR_CTRL_CLUSTER,
constants.DEFAULT_ZIGBEE_PROFILE_ID,
constants.COLOR_CTRL_MV_TO_HUE_SAT_CMD,
payload=move_to_hue_and_saturation_payload) | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/zcl.py | zcl.py |
import re
from collections import namedtuple
from typing import List
from ...zigbee_classes.long_address import LongAddress
from ..base import CommandWrapper as BaseCmdWrapper
from . import constants
from enum import Enum
MatchedEndpoint = namedtuple('MatchedEndpoint', ['src_add', 'id'])
class Commands:
""" CLI Commands to be used with firmware which includes Zigbee CLI component with ZDO commands.
"""
# Main command used in order to access ZDO subcommands
MAIN = 'zdo'
# Available ZDO commands
EUI64 = ' '.join([MAIN, 'eui64'])
EUI64_SET = ' '.join([MAIN, 'eui64 {dev_eui64:016X}'])
SHORT = ' '.join([MAIN, 'short'])
MATCH_DESC = ' '.join([MAIN, 'match_desc {dst_addr:04X} {req_addr:04X} {prof_id:04X} {input_cluster_ids} {output_cluster_ids}'])
IEEE_ADDR = ' '.join([MAIN, 'ieee_addr {short_addr}'])
NWK_ADDR = ' '.join([MAIN, 'nwk_addr {dst_eui64:016X}'])
BIND = ' '.join([MAIN, 'bind on {src_eui64:016X} {src_ep} {dst_eui64:016X} {dst_ep} {cluster:01X} {dst_short:04X}'])
MGMT_BIND = ' '.join([MAIN, 'mgmt_bind {short_addr:04X}'])
MGMT_LQI = ' '.join([MAIN, 'mgmt_lqi {short_addr:04X}'])
MGMT_LEAVE = ' '.join([MAIN, 'mgmt_leave {short_addr:04X}'])
# Regex to use when parsing responses
# Matches header of response to 'zdo mgmt_bind' CLI command
BINDING_TABLE_HEADER_RE = re.compile(
r"^\[idx]\s+src_address\s+src_endp\s+cluster_id\s+dst_addr_mode\s+dst_addr\s+dst_endp")
# Matches binding table entry of response to 'zdo mgmt_bind' CLI command
BINDING_TABLE_ENTRY_RE = re.compile(
r"^"
r"\[\s*(?P<idx>\d+)]" # idx field, like: '[ 0]' , '[ 1]', '[120]'
r"\s+"
r"(?P<src_addr>[0-9a-fA-F]{16})" # src_addr field, 16 hex digits
r"\s+"
r"(?P<src_endp>\d+)" # src_endp field, decimal like '0', '1', '153'
r"\s+"
r"0x(?P<cluster_id>[0-9a-fA-F]+)" # cluster_id field, hex number C fmt with 0x prefix, like: '0x104', '0x0104'
r"\s+"
r"(?P<dst_addr_mode>\d+)" # dst_addr_mode field, decimal
r"\s+"
r"(?P<dst_addr>N/A|[0-9a-fA-F]+)" # dst_addr field, 16 hex digits
r"\s+"
r"(?P<dst_endp>N/A|\d+)") # dst_endp field, decimal like '0', '1', '153'
# Matches binding table recap of response to zdo mgmt_bind CLI command
BINDING_TABLE_TOTAL_RE = re.compile(
r"Total entries for the binding table: (\d+)")
class UnexpectedResponseError(Exception):
"""
Exception raised when cli device responds in unexpected way.
This may happen when cli command has changed and response can't be parsed or response is malformed.
"""
pass
class ZigbeeBindingTableEntry:
def __init__(self, src_addr, src_ep, cluster_id, dst_addr_mode, dst_addr, dst_ep):
"""
Args:
src_addr (int): EUI64 source long address
src_ep (int): Source endpoint
cluster_id (int): Cluster id
dst_addr_mode (int): Destination address mode
dst_addr (int): EUI64 destination long address
dst_ep (int): Destination endpoint
"""
self.src_addr = src_addr
self.src_ep = src_ep
self.cluster_id = cluster_id
self.dst_addr_mode = dst_addr_mode
self.dst_addr = dst_addr
self.dst_ep = dst_ep
def __eq__(self, other):
return (self.src_addr == other.src_addr) and (self.src_ep == other.src_ep) and \
(self.cluster_id == other.cluster_id) and (self.dst_addr_mode == other.dst_addr_mode) and \
(self.dst_addr == other.dst_addr) and (self.dst_ep == other.dst_ep)
def __str__(self):
return self.to_row_str()
def to_row_str(self):
return '{:016x} {:3d} 0x{:04x} {:3d} {:016x} {:3d}'.format(self.src_addr, self.src_ep, self.cluster_id, self.dst_addr_mode, self.dst_addr, self.dst_ep)
class ZigbeeZdoMgmtLqiEntry:
def __init__(self, ext_pan_id, ext_addr, short_addr, flags, permit_join, depth, lqi):
"""
Args:
ext_pan_id (int): Extended PAN ID
ext_addr (int): EUI64 source long address
short_addr (int): Short address
flags (int): Flags
permit_join (int): Permit Join flag
depth (int): Tree depth
lqi (int): LQI
"""
self.ext_pan_id = ext_pan_id
self.ext_addr = ext_addr
self.short_addr = short_addr
self.flags = flags
self.permit_join = permit_join
self.depth = depth
self.lqi = lqi
def __eq__(self, other):
return (self.ext_pan_id == other.ext_pan_id) and (self.ext_addr == other.ext_addr) and \
(self.short_addr == other.short_addr) and (self.flags == other.flags) and \
(self.permit_join == other.permit_join) and (self.depth == other.depth) and \
(self.lqi == other.lqi)
def __str__(self):
return '{:016x} {:16x} 0x{:04x} 0x{:02x} {:d} {:d} {:d}'.format(self.ext_pan_id,
self.ext_addr, self.short_addr, self.flags, self.permit_join, self.depth, self.lqi)
class ZigbeeBindingTable:
"""
Class representing Zigbee Binding Table. It is just a list of ZigbeeBindingTableEntry objects with helper methods
"""
def __init__(self):
self.entries = []
def add(self, entry):
self.entries.append(entry)
def contains(self, entry):
"""
Checks if Binding Table contains given entry
Args:
entry (ZigbeeBindingTableEntry): Entry to find
Returns:
"""
for ent in self.entries:
if ent == entry:
return True
return False
def __eq__(self, other):
"""
Equality operator. Checks if two binding tables are equal. Note that binding table order is not important
Args:
other (ZigbeeBindingTable):
Returns:
bool: True if the two binding tables have the same entries (regardless of their order), False otherwise
"""
if self.length != other.length:
return False
for ent in self.entries:
if not other.contains(ent):
return False
for ent in other.entries:
if not self.contains(ent):
return False
return True
def __str__(self):
retstr = ""
for i, ent in enumerate(self.entries):
retstr += f"[{i:3d}] {ent}\n"
return retstr
@property
def length(self):
return len(self.entries)
class CommandWrapper(BaseCmdWrapper):
""" This class adds an interface for sending ZDO commands and receiving parsed
responses through Zigbee CLI by calling methods on a device instance.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def eui64(self):
""" Reads device's long address (EUI64) through CLI interface.
Returns:
LongAddress: representing device long address.
Raises:
ValueError: if received result with an unknown formatting
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
responses = self.cli.write_command(Commands.EUI64)
return LongAddress(responses[-1])
@eui64.setter
def eui64(self, new_address):
""" Set device's EUI64 address.
Args:
new_address (int): new device long address
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
self.cli.write_command(Commands.EUI64_SET.format(dev_eui64=new_address))
@property
def short_addr(self):
""" Reads device's short address through CLI interface.
Returns:
int: representing device short address.
Raises:
ValueError: if received result with an unknown formatting
CommandError: if device is not commissioned
TimeoutError: if timeout occurred
"""
responses = self.cli.write_command(Commands.SHORT)
return int(responses[-1], 16)
@short_addr.setter
def short_addr(self, new_address):
""" Currently setting device short address is not supported.
Args:
new_address (int): new device short (network) address
Raises:
ValueError: if received result with an unknown formatting
CommandError: if device is not commissioned
TimeoutError: if timeout occurred
NotImplementedError: if current CLI implementation does not support network address update
"""
raise NotImplementedError("Setting device short address is not supported")
def match_desc(self, input_clusters, output_clusters, timeout=80.0,
dst_addr=constants.BROADCAST_ADDRESS_ALL_DEVICES, req_addr=constants.BROADCAST_ADDRESS_ALL_DEVICES,
prof_id=constants.DEFAULT_ZIGBEE_PROFILE_ID) -> List[MatchedEndpoint]:
""" Send the Match Descriptor Request command.
Args:
dst_addr: 16-bit destination address
req_addr: requested address/type
prof_id: profile ID
input_clusters[int]: input cluster IDs
output_clusters[int]: output cluster IDs
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
ValueError: if attempts to write unsupported value type
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Returns:
List of MatchedEndpoint
"""
input_cluster = [str(len(input_clusters))] + [hex(e) for e in input_clusters]
output_cluster = [str(len(output_clusters))] + [hex(e) for e in output_clusters]
input_clusters_str = ' '.join(input_cluster)
output_clusters_str = ' '.join(output_cluster)
cmd = Commands.MATCH_DESC.format(dst_addr=dst_addr, req_addr=req_addr, prof_id=prof_id,
input_cluster_ids=input_clusters_str, output_cluster_ids=output_clusters_str)
response = self.cli.write_command(cmd, timeout=timeout)
responses = []
for r in response:
match_response = re.search(r'src_addr=([0-9a-fA-F]+) ep=(\d+)', r)
if match_response:
responses.append(MatchedEndpoint(*match_response.groups()))
return responses
def ieee_addr(self, short_addr, timeout=20.0):
""" Resolve the short network address `short_addr` to an EUI64 address.
Args:
short_addr (str): Short addres of the device
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
LongAddress: representing device long (EUI64) address.
"""
cmd = Commands.IEEE_ADDR.format(short_addr=short_addr)
response = self.cli.write_command(cmd, timeout=timeout)
return LongAddress(response[-1])
def nwk_addr(self, dst_eui64, timeout=20.0):
"""
Resolve the EUI64 address to a short network address `short_addr`.
Args:
dst_eui64 (int): EUI64 address of the device for which short network address is requested
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Returns:
int: devices's short address
"""
cmd = Commands.NWK_ADDR.format(dst_eui64=dst_eui64)
response = self.cli.write_command(cmd, timeout=timeout)
short_addr = int(response[-1], 16)
return short_addr
def bind(self, src_eui64, src_ep, dst_eui64, dst_ep, cluster, dst_short):
"""
Send the Bind Request command.
Args:
src_eui64(int): EUI64 address of the source of the binding
src_ep(int): endpoint of the source of the binding
dst_eui64(int): EUI64 address of the destination of the binding
dst_ep(int): endpoint of the destination of the binding
cluster(int): ZCL cluster which is being bound
dst_short(int): short address of where the command should be sent
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.BIND.format(src_eui64=src_eui64, src_ep=src_ep, dst_eui64=dst_eui64, dst_ep=dst_ep,
cluster=cluster, dst_short=dst_short)
self.cli.write_command(cmd, timeout=10.0)
def get_binding_table(self, short_addr, timeout=20.0):
"""
Send the zdo mgmt_bind request command and get binding table
Args:
short_addr (int): Short address of target device from which binding table should be get
Raises:
UnexpectedResponseError: if cli response to mgmt_bind command is malformed and cannot be parsed
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Returns:
ZigbeeBindingTable: Binding table filled up with entries received from given device
"""
cmd = Commands.MGMT_BIND.format(short_addr=short_addr)
response = self.cli.write_command(cmd, timeout=timeout)
result = ZigbeeBindingTable()
expected_idx = 0
class ParsingFsmState(Enum):
reset = 1
header = 2
entries = 3
total = 4
parsing_fsm_state = ParsingFsmState.reset
for r in response:
x = Commands.BINDING_TABLE_HEADER_RE.search(r)
if x is not None:
if parsing_fsm_state != ParsingFsmState.reset:
raise UnexpectedResponseError("Unexpected additional binding table header received")
parsing_fsm_state = ParsingFsmState.header
continue
x = Commands.BINDING_TABLE_ENTRY_RE.search(r)
if x is not None:
if (parsing_fsm_state == ParsingFsmState.header) or (parsing_fsm_state == ParsingFsmState.entries):
received_idx = int(x.group('idx'))
if expected_idx != received_idx:
raise UnexpectedResponseError(f"Unexpected binding table idx field received ({expected_idx} != {received_idx})")
# Handle special cases for destination address/destination endpoint.
if x.group('dst_addr') == "N/A":
dst_addr = -1
else:
dst_addr = int(x.group('dst_addr'), 16)
if x.group('dst_endp') == "N/A":
dst_endp = -1
else:
dst_endp = int(x.group('dst_endp'))
# Construct object from match using groups. This shall not fail as the regex matches
# valid characters only
entry = ZigbeeBindingTableEntry(
src_addr=int(x.group('src_addr'), 16),
src_ep=int(x.group('src_endp')),
cluster_id=int(x.group('cluster_id'), 16),
dst_addr_mode=int(x.group('dst_addr_mode')),
dst_addr=dst_addr,
dst_ep=dst_endp)
result.add(entry)
expected_idx += 1
parsing_fsm_state = ParsingFsmState.entries
elif parsing_fsm_state == ParsingFsmState.reset:
raise UnexpectedResponseError("Binding table entry received, but no header "
"had been received before")
else:
raise UnexpectedResponseError(
"Binding table entry received after binding table recap had been received")
continue
x = Commands.BINDING_TABLE_TOTAL_RE.search(r)
if x is not None:
if parsing_fsm_state == ParsingFsmState.total:
raise UnexpectedResponseError(
"Unexpected extra binding table total entries count received")
else:
total_entries_reported = int(x.group(1))
if total_entries_reported != expected_idx:
raise UnexpectedResponseError("Mismatch of real binding table entries received "
"with reported total entries count")
parsing_fsm_state = ParsingFsmState.total
continue
# Note: We allow other lines with content not matching any of used regex, but we simply ignore them
if parsing_fsm_state != ParsingFsmState.total:
raise UnexpectedResponseError("Missing total binding table entries row")
return result
def get_lqi_table(self, short_addr, timeout=20.0):
"""
Send the zdo mgmt_lqi request command and get lqi table
Args:
short_addr (int): Short address of target device from which lqi table should be get
Raises:
UnexpectedResponseError: if cli response to mgmt_lqi command is malformed and cannot be parsed
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Returns:
ZigbeeLqiTable: LQI table filled up with entries received from given device
"""
# Matches binding table entry of response to 'zdo mgmt_bind' CLI command
lqi_table_entry_re = re.compile(
r"\[\s*(?P<idx>\d+)]" # idx field, like: '[ 0]' , '[ 1]', '[120]'
r"\s+"
r"(?P<ext_pan_id>[0-9a-fA-F]{16})" # ext_pan_id field, 16 hex digits
r"\s+"
r"(?P<ext_addr>[0-9a-fA-F]{16})" # ext_addr field, 16 hex digits
r"\s+"
r"0x(?P<short_addr>[0-9a-fA-F]+)" # short_addr, hex number C fmt with 0x prefix, like: '0x104', '0x0104'
r"\s+"
r"0x(?P<flags>[0-9a-fA-F]+)" # flags field, hex number C fmt with 0x prefix, like: '0x104', '0x0104'
r"\s+"
r"(?P<permit_join>\d{1})" # permit_join field, decimal
r"\s+"
r"(?P<depth>\d+)" # tree depth, decimal
r"\s+"
r"(?P<lqi>\d+)") # lqi field, decimal like '0', '1', '153'
lqi_table_entry_header_re = re.compile(r"^\[idx]\s+ext_pan_id\s+ext_addr\s+short_addr\s+flags\s+permit_join\s+depth\s+lqi")
cmd = Commands.MGMT_LQI.format(short_addr=short_addr)
response = self.cli.write_command(cmd, timeout=timeout)
result = []
if not lqi_table_entry_header_re.match(response[0]):
raise UnexpectedResponseError("Header not found")
# Cut off table header
for r in response[1:]:
m = lqi_table_entry_re.match(r)
if m:
entry = ZigbeeZdoMgmtLqiEntry(ext_pan_id=int(m.group('ext_pan_id'), 16),
ext_addr=int(m.group('ext_addr'), 16),
short_addr=int(m.group('short_addr'), 16),
flags=int(m.group('flags'), 16),
permit_join=int(m.group('permit_join')),
depth=int(m.group('depth')),
lqi=int(m.group('lqi')))
result.append(entry)
return result
def mgmt_leave(self, short_addr, device_address=None, rejoin=False, children=False):
"""
Send the zdo mgmt_leave request command
Args:
short_addr (int): Short address of target device
device_address (int): Long address of device to remove from the network. If None, target device will remove itself
rejoin (bool): value of Rejoin flag
children (bool): value of Remove Children flag
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.MGMT_LEAVE.format(short_addr=short_addr, device_address=device_address)
if device_address is not None:
cmd += f" {device_address:016X}"
if rejoin:
cmd += " --rejoin"
if children:
cmd += " --children"
self.cli.write_command(cmd) | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/zdo.py | zdo.py |
import re
from zb_cli_wrapper.src.utils.communicator import CommandError
from ..base import CommandWrapper as BaseCmdWrapper
class Role:
""" Enumeration describing a Zigbee device role.
"""
COORDINATOR = 'zc'
ROUTER = 'zr'
END_DEVICE = 'zed'
class Commands:
""" CLI Commands to be used with firmware which includes Zigbee CLI component with BDB commands.
"""
# Main command used in order to access BDB subcommands
MAIN = 'bdb'
# Available BDB commands
ROLE = ' '.join([MAIN, 'role {role}'])
START = ' '.join([MAIN, 'start'])
IC_ADD = ' '.join([MAIN, 'ic add {ic:036X} {eui64:016X}'])
IC_SET = ' '.join([MAIN, 'ic set {ic:036X}'])
IC_POLICY = ' '.join([MAIN, 'ic policy {state}'])
PAN_ID = ' '.join([MAIN, 'panid {panid}'])
CHANNEL_GET = ' '.join([MAIN, 'channel'])
CHANNEL_SET = ' '.join([MAIN, 'channel {bitmask:08X}'])
LEGACY = ' '.join([MAIN, 'legacy {state}'])
FACTORY_RESET = ' '.join([MAIN, 'factory_reset'])
CHILD_MAX_SET = ' '.join([MAIN, 'child_max {max_device_children}'])
class CommandWrapper(BaseCmdWrapper):
""" This class adds an interface for sending BDB commands and receiving parsed
responses through Zigbee CLI by calling methods on a device instance.
"""
def start(self):
""" Start top level commissioning.
Raises:
ValueError: if received result with an unknown formatting
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
self.cli.write_command(Commands.START)
@property
def role(self):
""" Reads device role.
Raises:
ValueError: if received result with an unknown formatting
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.ROLE.format(role='')
responses = self.cli.write_command(cmd)
if responses[0] == str(Role.COORDINATOR):
return Role.COORDINATOR
elif responses[0] == str(Role.ROUTER):
return Role.ROUTER
elif responses[0] == str(Role.END_DEVICE):
return Role.END_DEVICE
else:
raise ValueError("Unrecognized Zigbee role received: {}".format(responses[0]))
@role.setter
def role(self, role):
""" Set the device role.
Args:
role (str): new device role. Can be either 'zc' or 'zr'
timeout (float): maximum time, in seconds, within which CLI should return command response
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
response
"""
cmd = Commands.ROLE.format(role=role)
return self.cli.write_command(cmd)
@property
def panid(self):
""" Reads PAN ID.
Raises:
ValueError: if received result with an unknown formatting
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
cmd = Commands.PAN_ID.format(panid='')
responses = self.cli.write_command(cmd)
return responses[0]
@panid.setter
def panid(self, panid):
""" Set the device PAN ID.
The PAN ID must be set before calling start().
Args:
panid (int): device PAN ID
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
response
"""
cmd = Commands.PAN_ID.format(panid=panid)
return self.cli.write_command(cmd)
def ic_add(self, ic, eui64):
""" Add the Install Code to the Trust Center.
Args:
ic (int): Install Code of the device to introduce
eui64 (int): EUI64 address of the device to introduce
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
response
"""
cmd = Commands.IC_ADD.format(ic=ic, eui64=eui64)
return self.cli.write_command(cmd)
def ic_set(self, ic):
""" Set the Install Code on the device.
Args:
ic (int): Install Code of the device to introduce
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
response
"""
cmd = Commands.IC_SET.format(ic=ic)
return self.cli.write_command(cmd)
def ic_set_policy(self, enabled):
""" Set the Install Code policy of the Trust Center.
Args:
enabled (bool): Whether the policy should be enabled.
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
response
"""
cmd = Commands.IC_POLICY.format(state="enable" if enabled else "disable")
return self.cli.write_command(cmd)
def factory_reset(self):
""" Perform factory reset by local action as stated in BDB specification chapter 9.5
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
"""
self.cli.write_command(Commands.FACTORY_RESET)
@property
def legacy(self):
""" Read the state of Legacy mode.
Raises:
CommandError: if CLI returns error
TimeoutError: if CLI timeout occured
Return:
True if enabled, False if disabled
"""
cmd = Commands.LEGACY.format(state="")
response = self.cli.write_command(cmd)
if response[0] == "on":
return True
if response[0] == "off":
return False
raise CommandError("Unknown value")
@legacy.setter
def legacy(self, enabled):
""" Set or unset the legacy (pre ZB 3.0) mode
Args:
enabled (bool): Whether the legacy should be enabled.
Raises:
CommandError: if CLI returns error
TimeoutError: if timeout occurred
Return:
response
"""
cmd = Commands.LEGACY.format(state="enable" if enabled else "disable")
return self.cli.write_command(cmd)
@property
def channel(self):
""" Read CLI channels list.
Raises:
CommandError: if CLI returns error
TimeoutError: if CLI timeout occured
Return:
channels (tuple) as a tuple with two lists of channels - primary and secondary
"""
response = self.cli.write_command(Commands.CHANNEL_GET)
primary_channel_rgx = re.compile('(Primary\ *channel\ *\(\ *s\ *\)\s*:\ *)((\d{2}\ *)*)')
secondary_channel_rgx = re.compile('(Secondary\ *channel\ *\(\ *s\ *\)\s*:\ *)((\d{2}\ *)*)')
primary_channel_match = primary_channel_rgx.search(response[0]).groups()
secondary_channel_match = secondary_channel_rgx.search(response[1]).groups()
primary_channels = [int(x) for x in primary_channel_match[1].split()]
secondary_channels = [int(x) for x in secondary_channel_match[1].split()]
return (primary_channels, secondary_channels)
@channel.setter
def channel(self, new_channel_list):
""" Set CLI channels.
Args:
new_channel_list (list, int): list of channels or a single channel to set
Raises:
CommandError: if CLI returns error
TimeoutError: if CLI timeout occurred
Return:
response
"""
if not isinstance(new_channel_list, list):
new_channel_list = [new_channel_list]
channel_bitmask = 0
for channel_nbr in new_channel_list:
channel_bitmask |= (2 ** channel_nbr)
return self.cli.write_command(Commands.CHANNEL_SET.format(bitmask=channel_bitmask))
def child_max(self, child_max_nbr):
""" Set number of children which can be connected to the device.
Args:
child_max_nbr (int): Number of children which the device can be parent to
Raises:
CommandError: if CLI returns error
TimeoutError: if CLI timeout occurred
"""
self.cli.write_command(Commands.CHILD_MAX_SET.format(max_device_children=child_max_nbr)) | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/bdb.py | bdb.py |
from ..base import CommandWrapper as BaseCmdWrapper
class Commands:
""" CLI Commands to be used with firmware which includes Logging module.
"""
# Main command used in order to access LOG subcommands
MAIN = 'log'
# Available LOG commands
ENABLE = ' '.join([MAIN, 'enable {level} {module}'])
class CommandWrapper(BaseCmdWrapper):
""" This class adds an interface for sending LOG commands and receiving parsed
responses through Zigbee CLI by calling methods on a device instance.
"""
def enable_logs(self, module, level="info"):
""" Enable the logs printing in the CLI
Args:
module(str): Module which to turn on the report.
level(str): Level of the logging to turn on.
"""
cmd = Commands.ENABLE.format(module=module, level=level)
self.cli.write_command(cmd, wait_for_success=False)
def gather_logs(self, aquisition_time=0):
""" Gather the logs and also clears the received logs.
Args:
aquisition_time: Time to collect logs
Returns: the list of captured logs. Every log is a dictionary of
module, level and string of the log.
"""
logs = self.cli.retrieved_logs(aquisition_time=aquisition_time)
self.cli.empty_logs()
return logs
def collect_logs_start(self):
""" Start collecting logs for undefined period of time from the time of this function is called,
lets clear buffers first:
- clear buffer
- clear _received_logs
"""
self.cli.clear()
self.cli.empty_logs()
def collect_logs_stop(self):
""" Returns logs collected from the time collect_logs_start was called.
After that, clear also buffers and collected logs.
Returns:
List with received logs.
"""
self.cli._wait_until_true(self.cli._check_if_command_finished, timeout=0.05)
logs = self.cli.received_logs()
self.cli.clear()
self.cli.empty_logs()
return logs | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/cmd_wrappers/zigbee/log.py | log.py |
from binascii import hexlify
import numpy as np
from zb_cli_wrapper.src.utils.cmd_wrappers.zigbee import constants
TYPE_MAP = {
constants.TYPES.UINT8: lambda value: hexlify(np.uint8(value)).decode(),
constants.TYPES.UINT16: lambda value: hexlify(np.uint16(value)).decode(),
constants.TYPES.UINT32: lambda value: hexlify(np.uint32(value)).decode(),
constants.TYPES.UINT64: lambda value: hexlify(np.uint64(value)).decode(),
constants.TYPES.SINT8: lambda value: hexlify(np.int8(value)).decode(),
constants.TYPES.SINT16: lambda value: hexlify(np.int16(value)).decode(),
constants.TYPES.SINT64: lambda value: hexlify(np.int64(value)).decode(),
constants.TYPES.ENUM8: lambda value: hexlify(np.uint8(value)).decode(),
constants.TYPES.STRING: lambda value: f"{len(value):02X}" + hexlify(value.encode("ascii")).decode(),
}
class Attribute(object):
def __init__(self, cluster, id, type, value=0, name="unknown"):
self.cluster = cluster
self.id = id
self.type = type
# If value is unsigned or signed int type
if self.type in range(constants.TYPES.UINT8, constants.TYPES.ENUM8+1):
self.value = int(value)
# If value is bool type
elif self.type is constants.TYPES.BOOL:
self.value = self.to_bool(value)
else:
# Other types than ints or bool types are not parsed
self.value = value
self.name = name
@property
def formatted_id(self) -> str:
return hexlify(np.uint16(self.id)).decode()
@property
def formatted_value(self) -> str:
to_call = TYPE_MAP.get(self.type)
if to_call:
return to_call(self.value)
raise NotImplementedError(f"Formatting type {self.type} is not implemented")
def __repr__(self):
return "Attribute {}: {}".format(self.name, self.value)
@staticmethod
def to_bool(bool_to_parse):
if str(bool_to_parse).lower() in ['true', 'yes', 'y', '1']:
return True
elif str(bool_to_parse).lower() in ['false', 'no', 'n', '0']:
return False
else:
return bool_to_parse
class StatusRecord:
SUCCESS = 0x00
INSUFFICIENT_SPACE = 0x89
def __init__(self, attribute: Attribute, status_code):
self.attribute = attribute
self.status_code = status_code
def to_hex(self) -> str:
hex_ = f"{self.attribute.formatted_id}{self.status_code:02X}"
if self.status_code == self.SUCCESS:
hex_ += f"{self.attribute.type:02X}{self.attribute.formatted_value}"
return hex_ | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/src/utils/zigbee_classes/clusters/attribute.py | attribute.py |
import re
import winreg
import logging
import traceback
from collections import namedtuple
from zb_cli_wrapper.nrf_dev_map.nrfmap_common import Vendor, ComPortMapAbstract, MBED_ID_LENGTH
# Serial number can be SEGGER (eg. 683512372) or CDC serial number (eg. F8C8284B1C1E)
UsbId = namedtuple('UsbId', 'Id SerialNumber')
class ComPortMapWindows(ComPortMapAbstract):
''' See ComPortMapAbstract for public functions documentation '''
VENDOR_IDS = {
Vendor.Segger: '1366',
Vendor.CDC: '1915'
}
''' USB VID hex values '''
@classmethod
def get_registered_boards_ids(cls, vendors):
return [board_id for board_id, _ in cls._get_usb_id_for_serial_num_gen(vendors)]
@classmethod
def get_iter(cls, vendors):
usb_id_for_serial_num = cls._get_usb_id_for_serial_num_gen(vendors)
yield from cls._create_com_port_map_gen(usb_id_for_serial_num)
@classmethod
def _get_usb_id_for_serial_num_gen(cls, vendors):
"""
:return: For example '683512372', UsbId(Id='VID_1366&PID_1015', SerialNumber='000683512372')
"""
if not isinstance(vendors, list):
vendors = [vendors]
regex = re.compile(r'USB\\VID_({vendor_ids})&PID_[\w]+\\([\w]+)'.format(
vendor_ids='|'.join(cls.VENDOR_IDS[v] for v in vendors)))
enum_usbccgp = r'SYSTEM\CurrentControlSet\Services\usbccgp\Enum'
enum_mbedComposite = r'SYSTEM\CurrentControlSet\Services\mbedComposite\Enum'
def get_device(enum_key):
number_of_values = winreg.QueryInfoKey(enum_key)[1]
for i in range(number_of_values):
value_name, value_data, _ = winreg.EnumValue(enum_key, i)
if value_name.isdigit(): # device 0, 1, 2...
m = regex.match(value_data)
if m:
id_parts = value_data.split('\\') # ['USB', 'VID_XXXX&PID_XXXX', 'SERIAL_NUMBER']
vid, board_id = m.groups()
if vid == cls.VENDOR_IDS[Vendor.Segger]:
board_id = board_id.lstrip('0')
yield board_id, UsbId(id_parts[1], id_parts[2])
for enum in [enum_usbccgp, enum_mbedComposite]:
try:
enum_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, enum)
yield from get_device(enum_key)
except OSError:
logging.debug("Serial service unavailible: {service}".format(service=enum))
@classmethod
def _create_com_port_map_gen(cls, usb_id_for_serial):
for snr, usb_id in usb_id_for_serial:
pid = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Enum\\USB\\' + usb_id.Id + '\\' +
usb_id.SerialNumber)
try:
parent_id_prefix = winreg.QueryValueEx(pid, "ParentIdPrefix")[0]
except OSError:
# Assume the ParentIdPrefix is the snr (From the mbedSerial_x64 driver)
parent_id_prefix = usb_id.SerialNumber
com_ports_by_index = {}
com_ports_by_mi = {}
com_ports_count = 0
key_usb = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SYSTEM\CurrentControlSet\Enum\USB')
n_subkeys, _, _ = winreg.QueryInfoKey(key_usb)
for i in range(n_subkeys):
key_name = winreg.EnumKey(key_usb, i)
m = re.match(usb_id.Id + '&MI_([\w]+)', key_name)
if m:
(multiple_interface, ) = m.groups()
comPortEntryKey = 'SYSTEM\\CurrentControlSet\\Enum\\USB\\' + usb_id.Id + \
"&MI_" + multiple_interface + "\\" + parent_id_prefix + \
("&00" + multiple_interface if len(parent_id_prefix) < MBED_ID_LENGTH else "")
# If parent_id_prefix is less than 48 chars it was procured from usbccgp rather than
# mbedCompositeEnum. usbccgp keeps the MI in the signature
try:
comPortEntry = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, comPortEntryKey)
comPortValue = str(winreg.QueryValueEx(comPortEntry, "FriendlyName")[0])
com_ports_by_index[com_ports_count] = com_ports_by_mi[multiple_interface] = \
comPortValue[comPortValue.index('(') + 1: comPortValue.index(')')]
com_ports_count += 1
except OSError:
logging.debug('No COM port found for %s (%s)', comPortEntryKey, traceback.format_exc())
except ValueError:
logging.debug('comPortValue.index: COM port not found in "%s"', comPortValue)
com_ports = cls.empty_com_ports_dict()
com_ports.update(com_ports_by_index)
com_ports.update(com_ports_by_mi)
yield snr, com_ports | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/nrf_dev_map/nrfmap_windows.py | nrfmap_windows.py |
import os, re
import logging
import itertools
from pathlib import Path
from zb_cli_wrapper.nrf_dev_map.nrfmap_common import Vendor, ALL_VENDORS, ComPortMapAbstract
class ComPortMapLinux(ComPortMapAbstract):
''' See ComPortMapAbstract for public functions documentation '''
VENDOR_NAMES = {
Vendor.Segger: 'usb-SEGGER_J-Link_',
Vendor.CDC: 'usb-Nordic_Semiconductor_.*_' #note: regex here
}
@classmethod
def get_registered_boards_ids(cls, vendors):
return [board_id for board_id, _ in cls._create_com_port_map_gen(vendors)]
@classmethod
def get_iter(cls, vendors):
yield from cls._create_com_port_map_gen(vendors)
@classmethod
def dev_usb_iter(cls, vendors=ALL_VENDORS):
'''
Get generator for board id - usb device path mapping
:param vendors: One or multiple vendors
:return: pairs like '683756583': '/dev/bus/usb/002/011'
'''
for board_id, com_ports in cls._create_com_port_map_gen(vendors):
sys_class_tty = Path('/sys/class/tty') / Path(com_ports[0]).name # /sys/class/tty/ttyACM0
sys_devices_usb_bus_port = sys_class_tty.resolve() / '../../..' # /sys/devices/pci0000:00/0000:00:0b.0/usb1/1-2
busnum = (sys_devices_usb_bus_port / 'busnum').read_text() # NB: only since Python 3.5
busnum = int(busnum)
devnum = (sys_devices_usb_bus_port / 'devnum').read_text()
devnum = int(devnum)
yield board_id, '/dev/bus/usb/%03d/%03d' % (busnum, devnum)
@classmethod
def _create_com_port_map_flat_gen(cls, vendors):
if not isinstance(vendors, list):
vendors = [vendors]
regex = re.compile('({vendor_names})([\d\w]+)-if([\w]+)'.format(
vendor_names='|'.join(cls.VENDOR_NAMES[v] for v in vendors)))
snrs_d = []
DEVPATH = '/dev/serial/by-id/'
try:
for dev in sorted(os.listdir(DEVPATH)):
match = regex.match(dev)
if match:
vendor_name, board_id, multiple_interface = match.groups()
if vendor_name == cls.VENDOR_NAMES[Vendor.Segger]:
board_id = board_id.lstrip('0')
com_port = os.path.realpath(DEVPATH + dev) #follow symlink to get target path
snrs_d.append((board_id, multiple_interface, com_port))
yield board_id, multiple_interface, com_port
except IOError as e:
logging.debug('ComPortMap IOError: %s', str(e))
@classmethod
def _create_com_port_map_gen(cls, vendors):
# Convert flat mapping (multiple_interface - com_port) to com_ports dict via
# grouping by board_tuple[0] (which is board_id)
for board_id, com_port_tuple in itertools.groupby(cls._create_com_port_map_flat_gen(vendors), lambda board_tuple: board_tuple[0]):
com_ports_by_index = {}
com_ports_by_mi = {}
for index, (_, multiple_interface, com_port) in enumerate(com_port_tuple, 0):
com_ports_by_index[index] = com_ports_by_mi[multiple_interface] = com_port
com_ports = cls.empty_com_ports_dict()
com_ports.update(com_ports_by_index)
com_ports.update(com_ports_by_mi)
yield board_id, com_ports | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/nrf_dev_map/nrfmap_linux.py | nrfmap_linux.py |
from enum import Enum
from abc import ABC, abstractmethod
from collections import defaultdict
from zb_cli_wrapper.nrf_dev_map.case_insensitive_dict import CaseInsensitiveDict
Vendor = Enum('Vendor', ['Segger', 'CDC'])
ALL_VENDORS = [v for v in Vendor]
MBED_ID_LENGTH = 48
class ComPortMapAbstract(ABC):
@classmethod
@abstractmethod
def get_registered_boards_ids(cls, vendors) -> 'List[str]':
'''
Get list of ids for devices registered in the system
:param: vendors: One or multiple vendors
:return: a list like ['683756583', '683011151'] or
['683512372', 'F8C8284B1C1E', '1102000044203120334c3941313034203431303397969903']
'''
pass #To be implemented in descendants
@classmethod
def get_com_ports_by_id(cls, board_id, vendors=ALL_VENDORS):
'''
Get COM port(s) for device with given id
:param vendors: One or multiple vendors
:return: a dict like {0: 'COM4', '00': 'COM4'} or
{0: 'COM10', 1: 'COM8', 2: 'COM9', '00': 'COM10', '02': 'COM8', '04': 'COM9'}.
Note that the dict is double-indexed by integer and two-digit value which corresponds to Multiple Interface
'''
id_lower = board_id.lower()
return next((v for k, v in cls.get_iter(vendors) if k.lower() == id_lower),
cls.empty_com_ports_dict())
@classmethod
def get(cls, vendors):
'''
Get full device id - COM port mapping
:param vendors: One or multiple vendors
:return: a dict like
{
'683756583': {0: 'COM8', '00': 'COM8'},
'960014618': {0: 'COM10', 1: 'COM8', 2: 'COM9', '00': 'COM10', '02': 'COM8', '04': 'COM9'},
1102000044203120334c3941313034203431303397969903': {0: 'COM5', '01': 'COM5'}
}
'''
result = CaseInsensitiveDict()
for board_id, com_ports in cls.get_iter(vendors):
result[board_id] = com_ports
return result
@classmethod
@abstractmethod
def get_iter(cls, vendors) -> 'Iterator[Tuple[str, dict]]':
'''
Similar to `get`, but returns generator
'''
pass #To be implemented in descendants
@classmethod
def empty_com_ports_dict(cls):
''' Empty dict for when no com ports were found (yet) '''
return defaultdict(type(None)) | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/zb_cli_wrapper/nrf_dev_map/nrfmap_common.py | nrfmap_common.py |
# MQTT Zigbee gateway application
The MQTT Zigbee Gateway application uses Zigbee CLI example to control Zigbee devices through the MQTT protocol.
The connection and communication with CLI is handled through the zb_cli_wrapper module.
The gateway maps the basic Zigbee clusters of devices to the MQTT topics.
- The devices are specified in the [configuration file](#Configuration file).
- The MQTT topics are addresses with a specific [topic structure pattern](#Topic structure pattern).
They are used to store resources that are then passed to the gateway application.
## Configuration file
The application reads the following configuration data from the `config.yaml` file:
- MQTT connection parameters
- List of devices to control with the device parameters
- MQTT topics to subscribe to
- CLI configuration
## Topic structure pattern
The MQTT topics have the following pattern:
``
home/device_alias/cluster_name/attribute_name
``
In this pattern, the following values are used in the gateway application:
- `device_alias` is the unique name for the device
- The device is specified with a long EUI64 address and an endpoint. The same address and different endpoints result in a different device.
- `cluster_name` is the name of the Zigbee cluster, and `attribute_name` is the name of the Zigbee attribute:
- `on_off` represents the On/Off cluster.
- `state` is the OnOff attribute.
- `lvl_ctrl` represents the Level cluster.
- `level` is the CurrentLevel attribute.
- `color_ctrl` represents the Color Control cluster.
- MQTT stores color-related information in the RGB color space, and the application translates it to the corresponding Zigbee HSV value.
For this reason, MQTT stores color values in three separate attribute names: `r`, `g`, `b` (red, green, and blue, respectively).
- `door_lock` represents the Door Lock cluster.
- `lock_state` is LockState attribute.
The application subscribes to the MQTT topics specified in the `config.yaml` file. For each topic value change, the application sends a ZCL command.
## Prerequisites
The application requires the following software:
- Python (version 3.7 or newer) with several Python modules.
- To install the required Python modules, run the following command:
```
pip install paho-mqtt pyyaml setuptools
```
- zb-cli-wrapper module. To install the zb-cli-wrapper module, run the `setup.py` script from the zb-cli-wrapper package directory:
```
python setup.py install
```
## Configuring the application behavior
See the following table for the default application behavior and recommended actions for configuring it.
| Step | Application behavior | Recommended actions |
|-------|---------------------------|---------------------|
| 1 | Tries to connect to the broker present on the local host. It closes if it fails to connect to the broker. | Make sure the broker is running at the local host, or configure the broker address in `config.yaml`. |
| 2 | Tries to connect to the CLI device by its SEGGER number, CDC serial number, or the com port number specified in `config.yaml`. It closes if it fails to open the com port. | Set the correct value of your board SEGGER number or serial number, or of the com port your board is connected to.|
| 3 | Uses the Zigbee channels list in `config.yaml`. | Set the correct channels in range from 11 to 26. If you are not sure which channel to use, set all of them.|
| 4 | Uses the Zigbee role defined in `config.yaml`. | Set the role of the CLI device you need: if you have a Zigbee network, use the router role: `'zr'`; if you do not have a Zigbee network, use the coordinator role to create network: `'zc'`. |
| 5 | Uses the pre-defined Zigbee devices. | Provide the correct long addresses and endpoints of devices you want to control. Check `readme.md` of the `zb_cli_wrapper` module to learn how to detect Zigbee devices and get their addresses and endpoints.<br>For every device you want to control provide topics to use. Check [this section](#topic-structure-pattern) to learn how to generate topics. |
## Running the application
You can run the application by calling Python with the application file name as argument:
```bash
python MQTT_Zigbee_gateway.py
``` | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/example/readme.md | readme.md |
import os
import sys
import time
import queue
import serial
import logging
import colorsys
import threading
import paho.mqtt.client as mqtt
from yaml import load
from math import ceil
from zb_cli_wrapper.zb_cli_dev import ZbCliDevice
from zb_cli_wrapper.src.utils.cmd_wrappers.zigbee.constants import *
# Configure logging for standard output with message formatted.
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(filename)s: %(levelname)s: %(message)s')
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
# Class to store data for ZCL commands, with auxiliary methods.
class ZCLRequestData:
def __init__(self, eui64, ep, cluster, cmd_id, def_resp, payload, profile=DEFAULT_ZIGBEE_PROFILE_ID):
self.eui64 = eui64
self.ep = ep
self.cluster = cluster
self.cmd_id = cmd_id
self.def_resp = def_resp
self.payload = payload
self.profile = profile
def __str__(self):
return ("EUI64={:016X} EP={} CLUSTER={:04X} CMD={:04X} RESP={} PAYLOAD={}".format(self.eui64, self.ep,
self.cluster,
self.cmd_id, self.def_resp,
self.payload))
@staticmethod
def set_on_off_state(device, state):
"""
Parse specified arguments and return the object ZCLRequestData with command to send.
Args:
device (dict): Dictionary containing 'eui64' and 'ep' as key-value pairs.
state (int): State of the Zigbee On/Off attribute to be set.
Return:
Object (ZCLRequestData) with the ZCL data.
"""
# Check if state is int in range 0-1
assert state in range(2), "Argument value out of range"
return ZCLRequestData(eui64=device['eui64'], ep=device['ep'], cluster=ON_OFF_CLUSTER, cmd_id=int(state),
def_resp=True, payload=None)
@staticmethod
def move_to_level_control(device, level):
"""
Parse specified arguments and return the object ZCLRequestData with command to send.
Args:
device (dict): Dictionary containing 'eui64' and 'ep' as key-value pairs.
level (int): Level of the Zigbee Level attribute to be set.
Return:
Object (ZCLRequestData) with the ZCL data.
"""
# Check if level is int in range 0-255
assert level in range(256), "Argument value out of range"
return ZCLRequestData(eui64=device['eui64'], ep=device['ep'], cluster=LVL_CTRL_CLUSTER,
cmd_id=LVL_CTRL_MV_TO_LVL_CMD, def_resp=True, payload=[(int(level), TYPES.UINT8), (1, TYPES.UINT16)])
@staticmethod
def move_to_hue_saturation(device, hue, saturation):
"""
Parse specified arguments and return the object ZCLRequestData with command to send.
Args:
device (dict): Dictionary containing 'eui64' and 'ep' as key-value pairs.
hue (int): Value of the current_hue attribute to be set.
saturation (int): Value of the current_saturation attribute to be set.
Return:
Object (ZCLRequestData) with the ZCL data.
"""
# Check if hue, saturation is int in range 0-254
assert hue in range(255), "Argument value out of range"
assert saturation in range(255), "Argument value out of range"
return ZCLRequestData(eui64=device['eui64'], ep=device['ep'],
cluster=COLOR_CTRL_CLUSTER, cmd_id=COLOR_CTRL_MV_TO_HUE_SAT_CMD, def_resp=True,
payload=[(int(hue), TYPES.UINT8), (int(saturation), TYPES.UINT8), (1, TYPES.UINT16)])
@staticmethod
def set_lock_state(device, lock_state):
"""
Parse specified arguments and return the object ZCLRequestData with command to send.
Args:
device (dict): Dictionary containing 'eui64' and 'ep' as key-value pairs.
lock_state (int): Value of the lock_state attribute to be set.
Return:
Object (ZCLRequestData) with the ZCL data.
"""
# Check if lock_state is int in range 0-1
assert lock_state in range(2), "Argument value out of range"
return ZCLRequestData(eui64=device['eui64'], ep=device['ep'], cluster=DOOR_LOCK_CLUSTER,
cmd_id=int(lock_state), def_resp=False, payload=None)
class ZBCLIThread(threading.Thread):
def __init__(self, cli_device):
threading.Thread.__init__(self)
# Queue to store ZCL command data. By default, maxsize=0 (that is, the queue size is infinite).
self.zcl_data_queue = queue.Queue()
# Create dictionary to store the last published value of each topic (key).
self.attr_value = {}
self.CLI_THREAD_SLEEP_S = 0.073
self.cli = cli_device
def run(self):
"""
Thread worker function for sending Zigbee commands.
"""
while True:
item = self.zcl_data_queue.get(block=True)
if item is None:
logging.info("Item taken from the queue is None. Trying to close CLI.")
self.cli.close_cli()
sys.exit()
# If the queue object is not of the ZCLRequestData type, close CLI and terminate thread by raising unhandled exception.
if not isinstance(item, ZCLRequestData):
logging.info("Item taken from the queue is not of the ZCLRequestData type. Closing CLI.")
self.cli.close_cli()
sys.exit()
logging.info("CLI cmd send : {}".format(str(item)))
self.cli.zcl.generic(eui64=item.eui64, ep=item.ep, cluster=item.cluster,
cmd_id=item.cmd_id, payload=item.payload, profile=item.profile)
self.zcl_data_queue.task_done()
# Sleep after every item taken from the queue.
time.sleep(self.CLI_THREAD_SLEEP_S)
def stop(self):
"""
Stop worker thread and CLI thread.
"""
# Put None to stop worker thread and CLI thread.
self.zcl_data_queue.put(None)
# Wait for CLI to close and for worker thread to stop.
self.cli.get_cli().join(timeout=5)
self.join(timeout=5)
def on_message(*args):
"""
Handler for on message MQTT callback.
Args:
args (list): List of arguments. 'self' is specified by object, 'client, user_data, msg' are specified by MQTT Client.
"""
self, client, user_data, msg = args
# Check whether the last value for specified topic exists.
if msg.topic not in self.attr_value:
self.attr_value[msg.topic] = float(-1)
logging.info("No last value stored for topic: {}".format(msg.topic))
# Exit earlier if topic value has not changed.
if self.attr_value[msg.topic] == float(msg.payload):
return
self.attr_value[msg.topic] = float(msg.payload)
try:
prefix, device_alias, cluster_name, topic_rest = msg.topic.split('/', maxsplit=3)
except ValueError:
try:
prefix, device_alias, cluster_name = msg.topic.split('/')
except ValueError:
logging.info("Problem splitting topic: {}. Topic is omitted".format(msg.topic))
return
device = get_device_by_alias(device_alias)
# Exit if no known device is found.
if device is None:
return
logging.info("Message - Topic: {} ,Payload: {}".format(msg.topic, msg.payload))
if cluster_name == "lvl_ctrl":
self.zcl_data_queue.put(
ZCLRequestData.move_to_level_control(device, ceil(self.attr_value[msg.topic] * 255.0 / 100.0)))
elif cluster_name == "on_off":
self.zcl_data_queue.put(
ZCLRequestData.set_on_off_state(device, ceil(self.attr_value[msg.topic])))
# By default, the command is sent with no payload (works only with locks that do not require pin).
elif cluster_name == "door_lock":
self.zcl_data_queue.put(
ZCLRequestData.set_lock_state(device, ceil(self.attr_value[msg.topic])))
elif cluster_name == "color_ctrl":
red = self.attr_value["home/{}/{}/r".format(device_alias, cluster_name)]
green = self.attr_value["home/{}/{}/g".format(device_alias, cluster_name)]
blue = self.attr_value["home/{}/{}/b".format(device_alias, cluster_name)]
brightness = self.attr_value["home/{}/lvl_ctrl/lvl".format(device_alias)]
# Convert color from rgb color_space to hsv color_space, scaled to the specified brightness.
hue, saturation, value = colorsys.rgb_to_hsv(red / 100.0 * brightness / 100.0,
green / 100.0 * brightness / 100.0,
blue / 100.0 * brightness / 100.0)
self.zcl_data_queue.put(ZCLRequestData.move_to_hue_saturation(device, ceil(hue * 254.0),
ceil(saturation * 254.0)))
def get_device_by_alias(alias):
"""
Find and return dictionary with device parameters. Search based on the specified alias.
Args:
alias (string): Alias of the device parameters to find.
Return:
Dictionary (dict) with device parameters.
"""
for device in config['device']:
if device['alias'] == alias:
return device
return None
def connect_to_broker(client, broker_address, broker_port, bind_address, reconnect_tries):
"""
Connect to MQTT broker with multiple reconnect attempts (tries).
Args:
client (object): MQTT client object.
broker_address (string): Broker address to connect to.
broker_port (int): Broker port to connect to.
bind_address (string): IP address of a local network interface to bind to if multiple interfaces exist.
reconnect_tries (int): Number of the reconnect attempts (tries).
"""
for x in range(reconnect_tries):
try:
client.connect(broker_address, port=broker_port, bind_address=bind_address)
except ConnectionRefusedError:
if x >= (reconnect_tries - 1):
logging.info('Can not connect to broker')
return False
else:
logging.info("Trying to connect to broker. Attempts left: {:d}".format(reconnect_tries - x - 1))
time.sleep(1)
else:
# If connected to broker, break.
return True
def create_cli_device(config_dict):
"""
Create CLI device with parameters specified as argument. Return the created CLI device object.
Args:
config_dict (dict): Dictionary with configuration parameters.
Return:
CLI device object (ZbCliDevice), created and connected. If error occurs, return None.
"""
# Read config_dict and prepare parameter for creating ZbCliDevice
param = {}
for cli_id in ['segger', 'cdc_serial', 'com_port']:
if cli_id in config_dict['CLIDevice']:
param[cli_id] = config_dict['CLIDevice'][cli_id]
try:
cli_dev = ZbCliDevice(**param)
cli_dev.bdb.channel = config_dict['CLIDevice']['channels']
cli_dev.bdb.role = config_dict['CLIDevice']['role']
except serial.serialutil.SerialException:
logging.info('Can not create CLI device')
cli_dev.close_cli()
return None
logging.info("CLI device created, trying to connect ...")
# Start commissioning.
cli_dev.bdb.start()
return cli_dev.wait_until_connected()
def main(config_dict):
"""
Application main function.
Args:
config_dict (dict): Dictionary with configuration parameters.
"""
# Create the MQTT client.
client = mqtt.Client(client_id='MQTT Zigbee gateway', userdata='')
client.username_pw_set(config_dict['MQTTClient']['MQTT_CLIENT_NAME'],
password=config_dict['MQTTClient']['MQTT_CLIENT_PASSWD'])
# Try to connect to broker.
if not connect_to_broker(client,
config_dict['MQTTClient']['BROKER_ADDRESS'],
config_dict['MQTTClient']['BROKER_PORT'],
config_dict['MQTTClient']['BIND_ADDRESS'],
config_dict['MQTTClient']['MQTT_RECONNECT_TRIES']):
logging.info("Exiting...")
sys.exit()
logging.info("Connected to broker. Starting CLI...")
# If connected to broker, try to create the CLI communicator.
cli_dev = create_cli_device(config)
if not cli_dev:
logging.info("Problem with creating the CLI communicator. Exiting...")
sys.exit()
# Create the ZBCLIThread object.
try:
cli_thread = ZBCLIThread(cli_dev)
cli_thread.start()
except RuntimeError:
logging.info("Error while creating thread.")
cli_thread.stop()
sys.exit()
# Assign the cli_thread.on_message handle function to MQTT client on message function.
client.on_message = cli_thread.on_message
# Subscribe to topics stored in the configuration dictionary.
for device in config['device']:
for topic in device['subscribe']:
client.subscribe(topic)
logging.info("Client started.")
"""
Start handling MQTT messages. Periodically check if CLI threads are alive.
The except for Keyboard Interrupt allows for closing CLI before closing the application.
"""
try:
while True:
client.loop(timeout=0.5)
if not cli_dev.get_cli().is_alive() or not cli_thread.is_alive():
logging.info('Problem with CLI communication - thread not alive. Closing...')
# Try to close the communicator and wait for threads to close.
cli_thread.stop()
break
except KeyboardInterrupt:
logging.info("Keyboard interrupt. Closing...")
# Try to close the communicator and wait for threads to close.
cli_thread.stop()
return
if __name__ == '__main__':
# Load the device configuration from config.yaml into config.
with open(os.path.join(ROOT_PATH, 'config.yaml')) as f:
config = load(f.read())
main(config) | zb-cli-wrapper | /zb-cli-wrapper-0.3.zip/zb-cli-wrapper-0.3/example/MQTT_Zigbee_gateway.py | MQTT_Zigbee_gateway.py |
# zb
[`zb`](https://pypi.org/project/zb/) is a python module, which contains some
tools and algorithm implemented by myself.
If you are interested in this module, you can install it by `pip install zb`.
## Tools
* Modify Dict
```python
from zb import AttrDict, OrderedAttrDict
# AttrDict is a dict that can get attribute by dot
d1 = AttrDict(x=1, y=2)
print(d1, '\n', d1.x, d1.y)
# OrderedAttrDict is same as AttrDict, but items are ordered
d2 = OrderedAttrDict(x=1, y=2)
print(d2, '\n', d2.x, d2.y)
```
* extract text from pdf file - `zb.tools.pdf.pdf2text`
```python
from zb.tools import pdf2text
pdf_path = "test.pdf"
pdf_url = "http://www.cninfo.com.cn/cninfo-new/disclosure/szse/download/1205276701?announceTime=2018-08-11"
text = pdf2text(pdf_path)
```
| zb | /zb-0.0.11.tar.gz/zb-0.0.11/README.md | README.md |
from __future__ import absolute_import, division, print_function, unicode_literals
from utils import time
import tensorflow as tf
import os
class GRU(object):
'''
GRU生成模式
'''
def __init__(self, vocab_size, embedding_dim, rnn_units, batch_size, buffer_size=10000,
checkpoint_dir='./training_checkpoints'):
'''
创建模型
:param vocab_size: 词汇数,所有特征的数量
:param embedding_dim: 词嵌入维度
:param rnn_units: 隐藏层节点数
:param batch_size: 批次
:param dataset: 数据
:param buffer_size: 数据缓存区大小
'''
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.rnn_units = rnn_units
self.batch_size = batch_size
self.buffer_size = buffer_size
# 默认
self.checkpoint_dir = checkpoint_dir
self.checkpoint_prefix = os.path.join(self.checkpoint_dir, 'ckpt_{epoch}')
def loss(self, labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def __call__(self, dataset):
self.dataset = dataset
self.optimizer = tf.keras.optimizers.Adam()
self.model = self.build_model()
return self.model
def build_model(self, vocab_size='', embedding_dim='', rnn_units='', batch_size=0):
"""构建模型并返回"""
# vocab_size 不设置则拿初始化值
vocab_size = vocab_size or self.vocab_size
embedding_dim = embedding_dim or self.embedding_dim
rnn_units = rnn_units or self.rnn_units
batch_size = batch_size or self.batch_size
model = tf.keras.Sequential([
# embbeding层
tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),
# GRU模型
tf.keras.layers.GRU(rnn_units, return_sequences=True, stateful=True,
recurrent_initializer='glorot_uniform'),
# 线性层调整输出维度
tf.keras.layers.Dense(vocab_size)
])
return model
# 定义损失函数
def loss(labels, logits):
"""返回损失函数对象"""
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def train_step(self, inp, target):
with tf.GradientTape() as tape:
predictions = self.model(inp)
loss = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(target, predictions, from_logits=True))
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss
def train(self, epochs: int = 10):
for epoch in range(epochs):
start = time.time()
hidden = self.model.reset_states()
for (batch_n, (inp, target)) in enumerate(self.dataset):
loss = self.train_step(inp, target)
if batch_n % 100 == 0:
template = 'Epoch {} Batch {} Loss {}'
print(template.format(epoch + 1, batch_n, loss))
if (epoch + 1) % 5 == 0:
self.model.save_weights(self.checkpoint_prefix.format(epoch=epoch + 1))
print('Epoch {} Loss {:.4f}'.format(epoch + 1, loss))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
self.model.save_weights(self.checkpoint_prefix.format(epoch=epoch + 1)) # 保存最后一次
return self.batch_size
def loadModel(self, vocab_size='', embedding_dim='', rnn_units='', batch_size=0):
vocab_size = vocab_size or self.vocab_size
embedding_dim = embedding_dim or self.embedding_dim
rnn_units = rnn_units or self.rnn_units
batch_size = batch_size or self.batch_size
print(batch_size)
self.model = self.build_model(vocab_size, embedding_dim, rnn_units, batch_size)
# 加载model
self.model.load_weights(tf.train.latest_checkpoint(checkpoint_dir=self.checkpoint_dir))
return self.model | zbAI | /zbAI-0.0.1.tar.gz/zbAI-0.0.1/zbmain/models/tf_gru.py | tf_gru.py |
import hashlib
import http.client
import json
import random
import urllib
from urllib.request import urlopen
__all__ = ['__getBaiduTranslateConfigFromJson','BaiduTranslate','YoudaoTranslate','GoogleTranslate']
def __getBaiduTranslateConfigFromJson(configUrl=''):
'''
json格式:
{
"translate":{
"baidu":{
"appid":"",
"secretKey":""
},
"google":{
"appid":"",
"secretKey":""
},"youdao":{
"appid":"",
"secretKey":""
}
}
}
:param configUrl:
:return:
'''
configJSON = configUrl or 'https://zbmain.com/files/others/config.json'
resultJSON = json.loads(urlopen(configJSON).read())
return resultJSON['translate']['baidu']['appid'], resultJSON['translate']['baidu']['secretKey']
class BaiduTranslate():
def __init__(self, appid, secretKey, fromLang='en', toLang='cn', apiUrl=''):
'''
appid、secretKey自行前往官方注册
:param appid:
:param secretKey:
:param fromLang: 翻译器的源语种,默认英文
:param toLang: 翻译器的目标语种,默默中文
:param apiUrl: api地址,默认空,若官方更新接口可新设置
'''
self.apiUrl = apiUrl or '/api/trans/vip/translate'
self.appid = appid
self.secretKey = secretKey
self.fromLang = fromLang
self.toLang = toLang
def __call__(self, text, fromLang='', toLang=''):
'''
:param text: 翻译输入
:param fromLang: 临时源语种【可选】
:param toLang: 临时目标语种【可选】
:return: (是否成功,输出,输入)
'''
fromLang = fromLang or self.fromLang
toLang = toLang or self.toLang
salt = str(random.randint(32768, 65536))
sign = self.appid + text + salt + self.secretKey
sign = hashlib.md5(sign.encode(encoding='utf-8')).hexdigest()
requestUrl = self.apiUrl + '?appid=' + self.appid + '&q=' + urllib.parse.quote(
text) + '&from=' + fromLang + '&to=' + toLang + '&salt=' + salt + '&sign=' + sign
try:
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', requestUrl)
# response是HTTPResponse对象
response = httpClient.getresponse()
result_all = response.read().decode("utf-8")
result = json.loads(result_all)
return True, result["trans_result"][0]["dst"], result["trans_result"][0]["src"]
except Exception as e:
return False, e
finally:
if httpClient:
httpClient.close()
class YoudaoTranslate():
def __init__(self):
print('To be updated')
class GoogleTranslate():
def __init__(self):
print('To be updated')
if __name__ == "__main__":
appid, secretKey = __getBaiduTranslateConfigFromJson()
baiduTranslate = BaiduTranslate(appid, secretKey, 'auto', 'en')
print(baiduTranslate('你好,世界!')) | zbAI | /zbAI-0.0.1.tar.gz/zbAI-0.0.1/zbmain/utils/translate.py | translate.py |
from numpy import argmax
class OneHot(object):
'''
onehot生成器
编码:call() || encode()
解码:decode()
'''
def __init__(self):
# 总类别特征列表
self.__class_lst = []
# 编码映射表
self.__char_to_num = []
# 源码映射表
self.__num_to_char = []
# onehot编码列表
self.__onehot_encoded = []
def __call__(self, sourceList:list, classList:list=None):
'''
列表 转 onehot编码
:param sourceList: 源列表
:param classList: 源列表总特征表。缺省:None(则等于源列表)
:return:onehot编码列表
'''
return self.encode(sourceList, classList)
def encode(self, sourceList:list, classList:list=None):
'''
列表 转 onehot编码(与call方法等价)
:param sourceList: 源列表
:param classList: 源列表总特征表。缺省:None(None则为源列表)
:return:onehot编码列表
'''
self.__class_lst = classList or sourceList #没有指定总类型表,则当前列表为总类型
self.__char_to_num = dict((c, n) for n, c in enumerate(self.__class_lst))
self.__num_to_char = dict((n, c) for n, c in enumerate(self.__class_lst))
integer_encoded = [self.__char_to_num[char] for char in sourceList]
# onehot编码数组
self.__onehot_encoded = []
for value in integer_encoded:
letter = [0 for _ in range(len(self.__class_lst))]
letter[value] = 1
self.__onehot_encoded.append(letter)
return self.__onehot_encoded
def decode(self, onehotNode:list):
'''
onehot编码元素 转 源列表元素
:param onehotNode: onehot编码返回的元素
:return:源列表元素
:example: decode([1,0,0])
'''
return self.__num_to_char[argmax(onehotNode)]
def getNodeOneHot(self, char:str):
'''
源列表元素 获取 onehot编码元素
:param char: 编码源元素
:return: 该元素的onehot编码
'''
return self.__onehot_encoded[self.__char_to_num[char]]
@property
def onehotCode(self):
'''获取onehot码'''
return self.__onehot_encoded
if __name__ == "__main__":
onehot = OneHot()
source = ['a', 'b', 'c', 'd']
onehot_list = onehot(source)
print(onehot_list)
print(onehot.getNodeOneHot(source[1]))
print(onehot.decode(onehot_list[1])) | zbAI | /zbAI-0.0.1.tar.gz/zbAI-0.0.1/zbmain/utils/onehot.py | onehot.py |
zbar-lite
----------
# This module is used to provide an easy way to pack zbar python binding to wheel. And also provide some pythonic apis for users to use zbar easily.
Only supports image related functions.
For now , it is only designed for *Python*.
# How to build and install
## 1. get zbar
```
git clone https://github.com/mchehab/zbar.git
```
**It is recommended for you to run cmd below first to get the right config.h.**
```
cd zbar
autoreconf -vfi
./configure --without-java --without-gtk --without-qt --without-imagemagick --disable-video --without-python
```
## 2. copy the source file we need
```
sh preparation.sh
```
There are two template config.h in the `zbar_lite/config_template` .
if you did not generate a config.h, we will copy one of them to `./src` according to your OS when setup.
*Before build, you should make sure that your gcc compiler is fit with your OS.*
you can install gcc build env from `https://sourceforge.net/projects/mingw-w64/files/` for windows
*Notice that mingw64 and mingw-w64 is not the same thing.*
It is recommended for you to install setuptools to install and build.
```
pip install setuptools wheel
```
### Windows
I select `x86_64-posix-seh-rev0` to build my wheel on Windows.
```
python setup.py build -c mingw32
```
to build whl
```
python setup.py build -c mingw32 bdist_wheel
```
to install
```
python setup.py build -c mingw32 install
```
### Linux
to build whl
```
CC="gcc -std=gnu99" python setup.py bdist_wheel
```
to install
```
CC="gcc -std=gnu99" python setup.py install
```
## Some errors you could meet:
### 1. Cannot find -lmsvcr140
if you build this whl in Windows with `python setup.py build_ext --compiler=mingw32`,
you may meet an error that `cannot find -lmsvcr140`, as you can see in <https://stackoverflow.com/questions/43873604/where-is-msvcr140-dll-does-it-exist>.
*I fixed it in the setup.py*
### 2. Should be build by std99 or gnu99
```
CC="gcc -std=gnu99" python setup.py bdist_wheel
```
### 3. Do not support inverted Code
When the background is darker than the QR Code's foreground, it's called an inverted Code.
These types of Codes typically have a dark background such as black, navy or dark grey.
While a few scanners can read an inverted Code, some apps are not able to scan them including us.
### 4. Can not detect the barcode
You can do some preprocess before decoding it.
It is recommended for you to try the ways below:
1. Turn it to gray
2. Split the color channel, such as use the ```b or g or r``` channel separately
# How to use
### *We provide several versions of whl right now. You can try to install via `pip install zbar-lite`.*
#### example1
```
import zbar
import cv2
img_path='./test.jpg'
# create a reader
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
# obtain image data
pil = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)
height, width = pil.shape[:2]
raw = pil.tobytes()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanner.scan(image)
# extract results
for symbol in image:
# do something useful with results
print('decoded', symbol.type, 'text', '"%s"' % symbol.data)
print('type {} text {} location {} quality {}'.format( symbol.type, symbol.data,symbol.location,symbol.quality))
# clean up
del(image)
```
#### example2
```
from zbar_helper.utils import decode, show_info
import cv2
image_path = "test.png"
img = cv2.imread(image_path)
print(decode(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)))
show_info(decode(cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)), img)
```
# For more documents you can visit <https://github.com/mchehab/zbar/tree/master/python>
# Some tests for more Barcode Tools <https://github.com/xulihang/Barcode-Reading-Performance-Test>
| zbar-lite | /zbar-lite-0.23.93.tar.gz/zbar-lite-0.23.93/README.md | README.md |
set -e
if [[ $1 = "--help" ]] || [[ $1 = "-h" ]]
then
echo "this is a script to prepare for zbar_lite build"
help="Usage: $0 [ZBAR_SOURCE_HOME] [WORKDIR]
-h, --help print this help, then exit
ZBAR_SOURCE_HOME the zbar source code home dir which should contain dir zbar and include
WORKDIR the work dir,default is the dir which this script in
"
echo "${help}"
exit 0
fi
DEFAULT_WORKDIR=$(dirname $(readlink -f "$0"))
WORKDIR=${2:-$DEFAULT_WORKDIR}
ZBAR_SOURCE_HOME=${1:-$WORKDIR}
echo " WORKDIR is $WORKDIR"
echo " ZBAR_SOURCE_HOME is $ZBAR_SOURCE_HOME"
echo "copy zbar source code"
mkdir -p ${WORKDIR}/src/zbar
mkdir -p ${WORKDIR}/src/zbar/video
cp ${ZBAR_SOURCE_HOME}/zbar/video/null.c ${WORKDIR}/src/zbar/video/
cp -r ${ZBAR_SOURCE_HOME}/zbar/decoder ${WORKDIR}/src/zbar/
cp -r ${ZBAR_SOURCE_HOME}/zbar/qrcode ${WORKDIR}/src/zbar/
cp -r ${ZBAR_SOURCE_HOME}/zbar/processor ${WORKDIR}/src/zbar/
cp -r ${ZBAR_SOURCE_HOME}/zbar/window ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/include/zbar.h ${WORKDIR}/src/
if [ -f "${ZBAR_SOURCE_HOME}/include/config.h" ]
then
cp ${ZBAR_SOURCE_HOME}/include/config.h ${WORKDIR}/src/
fi
cp ${ZBAR_SOURCE_HOME}/zbar/config.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/convert.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/debug.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/decoder.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/decoder.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/error.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/error.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/event.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/image.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/image.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/img_scanner.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/img_scanner.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/mutex.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/qrcode.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/refcnt.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/refcnt.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/scanner.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/sqcode.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/sqcode.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/svg.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/symbol.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/symbol.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/thread.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/timer.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/video.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/video.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/window.h ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/window.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/processor.c ${WORKDIR}/src/zbar/
cp ${ZBAR_SOURCE_HOME}/zbar/processor.h ${WORKDIR}/src/zbar/
echo "copy zbar python module source code"
zbar_source_for_python=${WORKDIR}/zbar_python/
mkdir -p ${zbar_source_for_python}
cp ${ZBAR_SOURCE_HOME}/python/*.c ${zbar_source_for_python}
cp ${ZBAR_SOURCE_HOME}/python/*.h ${zbar_source_for_python}
echo "set output encoding to UTF-8"
sed -i 's/"ISO8859-1"/"UTF-8"/g' ${WORKDIR}/src/zbar/qrcode/qrdectxt.c
sed -i 's/"BIG-5"/"UTF-8"/g' ${WORKDIR}/src/zbar/qrcode/qrdectxt.c
sed -i 's/"SJIS"/"UTF-8"/g' ${WORKDIR}/src/zbar/qrcode/qrdectxt.c | zbar-lite | /zbar-lite-0.23.93.tar.gz/zbar-lite-0.23.93/preparation.sh | preparation.sh |
import math
from dataclasses import dataclass, field
from typing import List, Tuple, Union
import zbar
try:
import cv2
except ModuleNotFoundError:
print("Warning,func show_info can not be used when cv2 is not available")
@dataclass
class Position:
"""
Position use to construct a res which is same as Zxing
"""
left_top: Union[List, Tuple] = field(default_factory=list)
left_bottom: Union[List, Tuple] = field(default_factory=list)
right_bottom: Union[List, Tuple] = field(default_factory=list)
right_top: Union[List, Tuple] = field(default_factory=list)
is_valid: bool = False
def __post_init__(self):
self.is_valid = bool(self.left_top and self.left_bottom and self.right_bottom and self.right_top)
class BarcodeRes:
"""
BarcodeRes
text : text of utf-8
type : barcode type
location : barcode point list
rect : bounding box of location
ori_orientation : zbar inner orientation (Only used for get points)
orientation : orientation degree
position : Position class with fields ["left_top", "left_bottom", "right_bottom", "right_top"]
"""
def __init__(self, x: zbar.Symbol):
self.text = x.data
self.type = str(x.type)
self.location = x.location
self.rect = get_bbox(x.location)
self.ori_orientation = str(x.orientation)
if len(self.location) != 4:
self.position = Position()
else:
if self.ori_orientation == "LEFT": # for LEFT
self.position = Position(*[self.location[0], self.location[3], self.location[2], self.location[1]])
else:
self.position = Position(*self.location)
self.orientation = get_clockwise_orientation(self.position.left_bottom, self.position.left_top, "degree")
def __repr__(self):
return str(self.__dict__)
def get_clockwise_orientation(start_p, end_p, return_format="degree"):
"""
calc clockwise orientation
:param start_p: start point
:param end_p: end point
:param return_format: degree or radian
:return:
"""
if len(start_p) != 2 or len(end_p) != 2:
return 0
d_x = end_p[0] - end_p[0]
d_y = end_p[1] - start_p[1]
if d_y == 0:
if d_x >= 0:
res = math.pi / 2
else:
res = -math.pi / 2
else:
res = math.atan(d_x / d_y)
if return_format == "degree":
res = res / math.pi * 180
return round(res)
def get_bbox(p_list):
"""
:param p_list:
:return:
"""
x_list, y_list = [item[0] for item in p_list], [item[1] for item in p_list]
p_left_top = (min(x_list), min(y_list))
p_right_bottom = (max(x_list), max(y_list))
x_center = (p_left_top[0] + p_right_bottom[0]) / 2
y_center = (p_left_top[1] + p_right_bottom[1]) / 2
width = p_right_bottom[0] - p_left_top[0]
height = p_right_bottom[1] - p_left_top[1]
return x_center, y_center, width, height
def decode(img):
"""
get BarCode decode result
:param img: cv2 image (np array)(gray is better)
:return:
"""
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
height, width = img.shape[:2]
raw = img.tobytes()
image = zbar.Image(width, height, 'Y800', raw)
scanner.scan(image)
res = [BarcodeRes(x) for x in image]
return res
def show_info(barcode_list: List[BarcodeRes], image):
"""
:param barcode_list:
:param image:
:return:
"""
for barcode in barcode_list:
(x, y, w, h) = barcode.rect
cv2.rectangle(image, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)),
(255, 255, 0), 10)
cv2.putText(image, barcode.text, (int(x - w / 2), int(y - h / 2)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imshow("res", image)
cv2.waitKey() | zbar-lite | /zbar-lite-0.23.93.tar.gz/zbar-lite-0.23.93/zbar_helper/utils.py | utils.py |
# zbar-py
## Introduction
Author: [Zachary Pincus](http://zplab.wustl.edu) <[email protected]>
Contributions: Rounak Singh <[email protected]> (example code and zbar.misc).
zbar-py is a module (compatible with both Python 2.7 and 3+) that provides an interface to the [zbar](http://zbar.sourceforge.net) bar-code reading library, which can read most barcode formats as well as QR codes. Input images must be 2D numpy arrays of type uint8 (i.e. 2D greyscale images).
The zbar library itself packaged along with zbar-py (it's built as a python extension), so no external dependencies are required. Building zbar requires the iconv library to be present, which you almost certainly have, except if you're on windows. Then you probably will need to download or build the iconv DLL. [Here](http://mlocati.github.io/articles/gettext-iconv-windows.html) are pre-built 32- and 64-bit binaries for same.
The python code is under the MIT license, and zbar itself is licensed under the GNU LGPL version 2.1.
## Prerequisites:
* iconv -- c library required for building zbar-py; see above
* numpy -- for running zbar-py
* pygame -- for examples using a webcam
## Simple examples:
More sophisticated examples can be found in 'examples' directory.
* Scan for barcodes in a 2D numpy array:
```python
import zbar
image = read_image_into_numpy_array(...) # whatever function you use to read an image file into a numpy array
scanner = zbar.Scanner()
results = scanner.scan(image)
for result in results:
print(result.type, result.data, result.quality, result.position)
```
* Scan for UPC-A barcodes and perform checksum validity test:
```python
import zbar
import zbar.misc
image = read_image_into_numpy_array(...) # get an image into a numpy array
scanner = zbar.Scanner()
results = scanner.scan(image)
for result in results:
if result.type == 'UPC-A':
print(result.data, zbar.misc.upca_is_valid(result.data.decode('ascii')))
```
| zbar-py | /zbar-py-1.0.4.tar.gz/zbar-py-1.0.4/README.md | README.md |
import requests
import json
import pathlib
import zbar
import zbar.misc
def imread(image_filename):
'''Example image-reading function that tries to use freeimage, skimage, scipy or pygame to read in an image'''
try:
from freeimage import read as read_image
except ImportError:
read_image = None
if read_image is None:
try:
from skimage.io import imread as read_image
except ImportError:
pass
if read_image is None:
try:
from scipy.misc import imread as read_image
except ImportError:
pass
if read_image is None:
try:
import pygame.image
import pygame.surfarray
def read_image(image_filename):
image_pygame_surface = pygame.image.load(image_filename)
return pygame.surfarray.array3d(image_pygame_surface)
except ImportError:
raise ImportError('for this example freeimage, skimage, scipy, or pygame are required for image reading')
image = read_image(image_filename)
if len(image.shape) == 3:
image = zbar.misc.rgb2gray(image)
return image
def print_book_name(isbn):
print('Requesting openlibrary.org for book name of ISBN: '+isbn)
url = 'http://openlibrary.org/api/books?bibkeys=ISBN:{}&format=json&jscmd=data'.format(isbn)
r = requests.get(url)
print('Request Status:', r.status_code)
if r.status_code == 200:
res=json.loads(r.text)
if len(res)!=0:
book_details=res['ISBN:' + isbn]
try:
print('Title:', book_details['title'])
except:
print('Title not found in data received from openlibrary.org')
else:
print('Not Found in database')
else:
print('error requesting API')
def print_product_name(product_id):
data_int=int(product_id)
print('Requesting opendatasoft.com for name of product GTID {0:013d}'.format(data_int))
url = 'http://pod.opendatasoft.com/api/records/1.0/search/?dataset=pod_gtin&q={0:013d}&facet=gpc_s_nm&facet=brand_nm&facet=owner_nm&facet=gln_nm&facet=prefix_nm'.format(data_int)
r = requests.get(url)
print('Request Status:',r.status_code)
if r.status_code ==200:
#print(r.text)
res=json.loads(r.text)
#print(res)
records=res['records']
if res['nhits'] == 0:
print('Product Not found')
else:
print('NumHits:', res['nhits'] )
for record in records:
#print(record)
data_fields=record['fields']
try:
print('Product Name:', data_fields['gtin_nm'])
print('Product GTID:', data_fields['gtin_cd'])
print('Company Name:', data_fields['brand_nm'])
except:
print('Key not found in data received from opendatasoft.com')
else:
print('error requesting API')
def lookup_barcodes(results):
"""Look up barcodes from a list of barcode results returned by zbar.Scanner.scan"""
for result in results:
if result.type.startswith('ISBN'):
print_book_name(result.data.decode("ascii"))
elif result.type == 'UPC-E':
converted_id=zbar.misc.upce2upca(result.data.decode("ascii"))
print_product_name(converted_id)
else:
print_product_name(result.data.decode("ascii"))
barcode_dir = pathlib.Path(__file__).parent / 'barcodes'
scanner = zbar.Scanner()
for image in sorted(barcode_dir.glob('*')):
print('Scanning image ' + image.name)
image_as_numpy_array = imread(image)
results = scanner.scan(image_as_numpy_array)
lookup_barcodes(results) | zbar-py | /zbar-py-1.0.4.tar.gz/zbar-py-1.0.4/examples/barcode_lookup.py | barcode_lookup.py |
import zbar
import zbar.misc
import numpy
import time
import pygame
import pygame.camera
import pygame.image
import pygame.surfarray
def get_image_array_from_cam(cam_name, cam_resolution):
'''Get animage ndarray from webcam using pygame.'''
pygame.init()
pygame.camera.init()
pygame.camera.list_cameras()
cam = pygame.camera.Camera(cam_name, cam_resolution)
screen = pygame.display.set_mode(cam.get_size())
print('Get a pic of barcode. If pic doesnot look good, then press enter at terminal. \
Camera will take another pic. When done press q and enter to quit camera mode')
while True:
cam.start()
time.sleep(0.5) # You might need something higher in the beginning
pygame_screen_image = cam.get_image()
screen.blit(pygame_screen_image, (0,0))
pygame.display.flip() # update the display
cam.stop()
if input() == 'q':
break
pygame.display.quit()
image_ndarray = pygame.surfarray.array3d(pygame_screen_image)
if len(image_ndarray.shape) == 3:
image_ndarray = zbar.misc.rgb2gray(image_ndarray)
return image_ndarray
#----------------------------------------------------------------------------------
# Get the pic
# To get pic from cam or video, packages like opencv or simplecv can also be used.
#----------------------------------------------------------------------------------
# Cam name might vary depending on your PC.
cam_name='/dev/video1'
cam_resolution=(640,480) # A general cam resolution
img_ndarray = get_image_array_from_cam(cam_name, cam_resolution)
#-------------------------------------------------------------------------
# Read the Barcode
#-------------------------------------------------------------------------
# Detect all
scanner = zbar.Scanner()
results = scanner.scan(img_ndarray)
if results==[]:
print("No Barcode found.")
else:
for result in results:
# By default zbar returns barcode data as byte array, so decode byte array as ascii
print(result.type, result.data.decode("ascii"), result.quality) | zbar-py | /zbar-py-1.0.4.tar.gz/zbar-py-1.0.4/examples/barcodes_from_webcam.py | barcodes_from_webcam.py |
import numpy
def rgb2gray(rgb):
'''
converts rgb to grayscale image
rgb is of type numpy.ndarray
'''
return numpy.dot(rgb[...,:3], [0.299, 0.587, 0.114]).astype(numpy.uint8)
def upca_to_ean13(upca):
'''
Takes unicode UPC-A.
Returns unicode EAN-13
'''
# Check length and type of ean8
if len(upca)!=12:
raise ValueError("full UPC-A should be of length 12")
else:
try:
upca=int(upca)
except ValueError:
raise ValueError('UPC-A should be numerical digits')
return '{0:013d}'.format(upca)
def ean8_to_ean13(ean8):
'''
Takes unicode EAN-8.
Returns unicode EAN-13
'''
# Check length and type of ean8
if len(ean8)!=8:
raise ValueError("EAN-8 should be of length 8")
else:
try:
ean8=int(ean8)
except ValueError:
raise ValueError('EAN-8 should be numerical digits')
return '{0:013d}'.format(ean8)
def _upca_checksum(digits):
odd_digits = digits[0::2]
even_digits = digits[1::2]
return (sum(odd_digits)*3 + sum(even_digits)) % 10
def upca_get_check_digit(upca):
'''
calculates the checksum of upca
UPC-A code must be passed as str.
Check Digit is returned as int.
Error: returns None
'''
# return a list of digits from a number
try:
digits = list(map(int, upca))
except ValueError:
raise ValueError("UPC-A should be numerical digits")
if len(digits) == 12:
digits = digits[:-1]
elif len(digits) != 11:
raise ValueError("UPC-A should be of length 11 (without optional check digit)")
checksum = _upca_checksum(digits)
check_digit = 0 if checksum == 0 else 10 - checksum
return check_digit
def upca_is_valid(upca):
'''
verifies that the checksum of full upca (12 digits) is valid.
UPC-A must be passed as str
return type is Boolean
'''
if len(upca) != 12:
raise ValueError("UPC-A should be of length 12 (with check digit)")
try:
digits = list(map(int, upca))
except ValueError:
raise ValueError("UPC-A should be numerical digits")
checksum = _upca_checksum(digits)
return checksum == 0
def upce_2_upca(upc_e):
'''
This function converts a UPC-E code into UPC-A
UPC-E must be passed as str.
UPC-A is returned as str
if any error then None is returned.
Ref:
http://www.taltech.com/barcodesoftware/symbologies/upc
http://stackoverflow.com/questions/31539005/how-to-convert-a-upc-e-barcode-to-a-upc-a-barcode
'''
# converting to strings
upc_e=str(upc_e)
# Checking if the barcodes have numbers only
try:
int(upc_e)
except ValueError:
raise ValueError("UPC-E should be numerical digits")
# If the first digit of UPC-E is not 0
if upc_e[0] != '0':
raise ValueError("First digit of UPC-E should be zero")
upc_a='0'+upc_e[1]+upc_e[2]
zeros='0000'
if upc_e[6] == '0' or upc_e[6] == '1' or upc_e[6] == '2':
upc_a+=upc_e[6]+zeros+upc_e[3:-2]
elif upc_e[6]== '3':
upc_a+=upc_e[3]+zeros+'0'+upc_e[4:-2]
elif upc_e[6]== '4':
upc_a+=upc_e[3:5]+zeros+'0'+upc_e[5]
else:
upc_a+=upc_e[3:6]+zeros+upc_e[6]
# Add checksum digit
upc_a+=upc_e[-1]
# verify UPC-E code if valid using Checksum
if upca_is_valid(upc_a):
return upc_a
else:
msg='UPC-E is invalid. Please verify the checksum digit. \nValid checksum digit = '+upca_get_check_digit(upc_a) + \
'\nSo, valid UPC-A is '+ upc_a[:-1] + upca_get_check_digit(upc_a)
raise ValueError(msg) | zbar-py | /zbar-py-1.0.4.tar.gz/zbar-py-1.0.4/zbar/misc.py | misc.py |
import ctypes
import numpy
import sys
import os.path
import glob
import collections
__all__ = ['ZBAR_SYMBOLS', 'ZBAR_CONFIGS', 'Scanner', 'Symbol']
def load_zbar():
if sys.platform == 'win32':
loader = ctypes.windll
functype = ctypes.WINFUNCTYPE
else:
loader = ctypes.cdll
functype = ctypes.CFUNCTYPE
zbar = None
errors = []
possible_zbar_libs = glob.glob(os.path.join(os.path.dirname(__file__), '_zbar.*'))
for lib in possible_zbar_libs:
try:
zbar = loader.LoadLibrary(lib)
break
except Exception:
# Get exception instance in Python 2.x/3.x compatible manner
e_type, e_value, e_tb = sys.exc_info()
del e_tb
errors.append((lib, e_value))
if zbar is None:
if errors:
# No zbar library loaded, and load-errors reported for some
# candidate libs
err_txt = ['%s:\n%s' % (l, str(e.args[0])) for l, e in errors]
raise RuntimeError('One or more zbar libraries were found, but '
'could not be loaded due to the following errors:\n'
'\n\n'.join(err_txt))
else:
# No errors, because no potential libraries found at all!
raise RuntimeError('Could not find a zbar library in ' + __file__)
return zbar
_ZB = load_zbar()
API = {
'zbar_image_scanner_create': (ctypes.c_void_p, ()),
'zbar_image_scanner_destroy': (None, (ctypes.c_void_p,)),
'zbar_image_scanner_set_config': (None, (ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_int)),
'zbar_scan_image': (ctypes.c_int, (ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p)),
'zbar_image_scanner_first_symbol': (ctypes.c_void_p, (ctypes.c_void_p,)),
'zbar_symbol_next': (ctypes.c_void_p, (ctypes.c_void_p,)),
'zbar_symbol_get_type': (ctypes.c_uint, (ctypes.c_void_p,)),
'zbar_get_symbol_name': (ctypes.c_char_p, (ctypes.c_uint,)),
'zbar_symbol_get_data': (ctypes.c_void_p, (ctypes.c_void_p,)),
'zbar_symbol_get_data_length': (ctypes.c_uint, (ctypes.c_void_p,)),
'zbar_symbol_get_quality': (ctypes.c_int, (ctypes.c_void_p,)),
'zbar_symbol_get_loc_size': (ctypes.c_uint, (ctypes.c_void_p,)),
'zbar_symbol_get_loc_x': (ctypes.c_int, (ctypes.c_void_p, ctypes.c_uint)),
'zbar_symbol_get_loc_y': (ctypes.c_int, (ctypes.c_void_p,ctypes.c_uint)),
}
def register_api(lib, api):
for f, (restype, argtypes) in api.items():
func = getattr(lib, f)
func.restype = restype
func.argtypes = argtypes
register_api(_ZB, API)
ZBAR_SYMBOLS = {
'ZBAR_NONE' : 0, # /**< no symbol decoded */
'ZBAR_PARTIAL' : 1, # /**< intermediate status */
'ZBAR_EAN8' : 8, # /**< EAN-8 */
'ZBAR_UPCE' : 9, # /**< UPC-E */
'ZBAR_ISBN10' : 10, # /**< ISBN-10 (from EAN-13). @since 0.4 */
'ZBAR_UPCA' : 12, # /**< UPC-A */
'ZBAR_EAN13' : 13, # /**< EAN-13 */
'ZBAR_ISBN13' : 14, # /**< ISBN-13 (from EAN-13). @since 0.4 */
'ZBAR_I25' : 25, # /**< Interleaved 2 of 5. @since 0.4 */
'ZBAR_CODE39' : 39, # /**< Code 39. @since 0.4 */
'ZBAR_PDF417' : 57, # /**< PDF417. @since 0.6 */
'ZBAR_QRCODE' : 64, # /**< QR Code. @since 0.10 */
'ZBAR_CODE128' : 128, # /**< Code 128 */
'ZBAR_SYMBOL' : 0x00ff, # /**< mask for base symbol type */
'ZBAR_ADDON2' : 0x0200, # /**< 2-digit add-on flag */
'ZBAR_ADDON5' : 0x0500, # /**< 5-digit add-on flag */
'ZBAR_ADDON' : 0x0700, # /**< add-on flag mask */
}
ZBAR_CONFIGS = {
'ZBAR_CFG_ENABLE': 0, #/**< enable symbology/feature */
'ZBAR_CFG_ADD_CHECK': 1, #/**< enable check digit when optional */
'ZBAR_CFG_EMIT_CHECK': 2, #/**< return check digit when present */
'ZBAR_CFG_ASCII': 3, #/**< enable full ASCII character set */
'ZBAR_CFG_NUM': 4, #/**< number of boolean decoder configs */
'ZBAR_CFG_MIN_LEN': 0x20, #/**< minimum data length for valid decode */
'ZBAR_CFG_MAX_LEN': 0x21, #/**< maximum data length for valid decode */
'ZBAR_CFG_POSITION': 0x80, #/**< enable scanner to collect position data */
'ZBAR_CFG_X_DENSITY':0x100, #/**< image scanner vertical scan density */
'ZBAR_CFG_Y_DENSITY':0x101, #/**< image scanner horizontal scan density */
}
Symbol = collections.namedtuple('Symbol', ['type', 'data', 'quality', 'position'])
class Scanner(object):
def __init__(self, config=None):
"""Create a barcode-scanner object.
By default, scanning for all barcode types is enabled, and reporting of
their locations is enabled. This can be controlled by the config parameter.
Parameters:
config: None or a list of (symbol_type, config_type, value) triples.
* symbol_type must be one of ZBAR_SYMBOLS, which refers to a
class of barcodes. ZBAR_NONE will cause the configuration
option to apply to all barcode types.
* config_type must be one of ZBAR_CONFIGS, defined in zbar.h.
Of particular interest are ZBAR_CFG_ENABLE (enable specific
symbol type), ZBAR_CFG_ADD_CHECK (enable check-digit
verification) and ZBAR_CFG_MIN_LEN and ZBAR_CFG_MAX_LEN (only
return decoded barcodes with the specified data length).
NB: Enabling/disabling specific barcode types is complex and
not particularly well supported by zbar (some barcode types
will be scanned-for by default unless disabled; some require
specific enablement; some types like ISBN and UPC that are
subclasses of EAN barcodes require EAN to also be enabled).
Thus is is STRONGLY recommended to use the default config
and filter for barcode types after the fact.
* value should be 1 for boolean options, or an integer for the
other options.
"""
self._scanner = _ZB.zbar_image_scanner_create()
if config is None:
config = [('ZBAR_NONE', 'ZBAR_CFG_ENABLE', 1), ('ZBAR_NONE', 'ZBAR_CFG_POSITION', 1)]
for symbol_type, config_type, value in config:
_ZB.zbar_image_scanner_set_config(self._scanner, ZBAR_SYMBOLS[symbol_type], ZBAR_CONFIGS[config_type], value)
def __del__(self):
_ZB.zbar_image_scanner_destroy(self._scanner)
del self._scanner
def scan(self, image):
"""Scan an image and return a list of barcodes identified.
Parameters:
image: must be a 2-dimensional numpy array of dtype uint8.
Returns: list of Symbol namedtuples.
Each Symbol has 'type', 'data', 'quality', and 'position' attributes.
* 'type' refers to the barcode's type (e.g. 'QR-Code')
* 'data' is a bytes instance containing the barcode payload
* 'quality' is a numerical score
* 'position' is either an empty list (if position recording was
disabled), or a list of (x, y) indices into the image that define
the barcode's location.
"""
image = numpy.asarray(image)
if not image.dtype == numpy.uint8 and image.ndim == 2:
raise ValueError('Image must be 2D uint8 type')
if image.flags.c_contiguous:
height, width = image.shape
else:
image = numpy.asfortranarray(image)
width, height = image.shape
num_symbols = _ZB.zbar_scan_image(self._scanner, width, height, image.ctypes.data)
symbols = []
symbol = _ZB.zbar_image_scanner_first_symbol(self._scanner)
while(symbol):
sym_type = _ZB.zbar_symbol_get_type(symbol)
sym_name = _ZB.zbar_get_symbol_name(sym_type).decode('ascii')
sym_data_ptr = _ZB.zbar_symbol_get_data(symbol)
sym_data_len = _ZB.zbar_symbol_get_data_length(symbol)
sym_data = ctypes.string_at(sym_data_ptr, sym_data_len)
sym_quality = _ZB.zbar_symbol_get_quality(symbol)
sym_loc = []
for i in range(_ZB.zbar_symbol_get_loc_size(symbol)):
x = _ZB.zbar_symbol_get_loc_x(symbol, i)
y = _ZB.zbar_symbol_get_loc_y(symbol, i)
sym_loc.append((x, y))
symbols.append(Symbol(sym_name, sym_data, sym_quality, sym_loc))
symbol = _ZB.zbar_symbol_next(symbol)
assert len(symbols) == num_symbols
return symbols | zbar-py | /zbar-py-1.0.4.tar.gz/zbar-py-1.0.4/zbar/zbar.py | zbar.py |
# OpenCV
## Linux Camera support
In order to be able to use the camera on Linux, you need to compile OpenCV.
Simply installing `opencv-python` from pypi is not enough.
Use the [Makefile](Makefile) provided to compile and install OpenCV library.
```
make system_dependencies
make opencv
```
It would build OpenCV and deploy it to your virtualenv.
## Troubleshooting
### Makefile `cp: cannot stat 'opencv-4.0.1/build/lib/python3/cv2*.so': No such file or directory`
Log:
```
make[2]: Leaving directory '/tmp/trash/zbarcam/opencv-4.0.1/build'
make[1]: Leaving directory '/tmp/trash/zbarcam/opencv-4.0.1/build'
cp opencv-4.0.1/build/lib/python3/cv2*.so venv/lib/python3.7/site-packages
cp: cannot stat 'opencv-4.0.1/build/lib/python3/cv2*.so': No such file or directory
Makefile:97: recipe for target 'venv/lib/python3.7/site-packages/cv2*.so' failed
make: *** [venv/lib/python3.7/site-packages/cv2*.so] Error 1
```
Most likely you need to `pip install numpy` delete your opencv build and build again.
| zbarcam | /zbarcam-2019.902.tar.gz/zbarcam-2019.902/OpenCV.md | OpenCV.md |
# zbarcam
[](https://travis-ci.org/kivy-garden/zbarcam)
Real time Barcode and QR Code scanner using the camera.
It's built on top of [Kivy](https://github.com/kivy/kivy) and [pyzbar](https://github.com/NaturalHistoryMuseum/pyzbar).
<img src="https://raw.githubusercontent.com/AndreMiras/garden.zbarcam/develop/screenshot.gif" align="right" width="256" alt="screenshot" />
## How to use
Simply import and instanciate `ZBarCam` in your kvlang file and access its `symbols` property.
```yaml
#:import ZBarCam kivy_garden.zbarcam.ZBarCam
#:import ZBarSymbol pyzbar.pyzbar.ZBarSymbol
BoxLayout:
orientation: 'vertical'
ZBarCam:
id: zbarcam
# optional, by default checks all types
code_types: ZBarSymbol.QRCODE, ZBarSymbol.EAN13
Label:
size_hint: None, None
size: self.texture_size[0], 50
text: ', '.join([str(symbol.data) for symbol in zbarcam.symbols])
```
A full working demo is available in [kivy_garden/zbarcam/main.py](kivy_garden/zbarcam/main.py).
## Install
### Ubuntu
Install system requirements (Ubuntu 18.04):
```sh
sudo apt install libzbar-dev
```
Install garden requirements:
```sh
garden install --upgrade xcamera
```
Install zbarcam:
```sh
pip install --upgrade https://github.com/kivy-garden/zbarcam/archive/develop.zip
```
Then import it in your Python code via:
```python
from kivy_garden.zbarcam import ZBarCam
```
You may also need to compile/install OpenCV manually, see [OpenCV.md](OpenCV.md).
### Android
Build for Android via buildozer, see [buildozer.spec](buildozer.spec).
## Contribute
To play with the project, install system dependencies and Python requirements using the [Makefile](Makefile).
```sh
make
```
Then verify everything is OK by running tests.
```sh
make test
make uitest
```
## Troubleshooting
### Install `Unable to import package 'kivy.garden.xcamera.XCamera'`
You're missing the `xcamera` dependency. Install it as described in the install instructions.
### Android `ValueError: Empty module name`
More likely an import issue in your `.kv` file.
Try to `from zbarcam import ZBarCam` in your `main.py` to see the exact error.
It's common to forget `Pillow` in `buildozer.spec` `requirements` section.
### OpenCV related
See [OpenCV.md](OpenCV.md).
## Credits
I borrowed a lot of code from [tito/android-zbar-qrcode](https://github.com/tito/android-zbar-qrcode).
| zbarcam | /zbarcam-2019.902.tar.gz/zbarcam-2019.902/README.md | README.md |
# Change Log
## [20190902]
- Update Cython for Python3.7 support, refs #35
- Make garden.zbarcam a package again, refs #36
- Don't ship opencv directory to APK, refs #37
- Migrate to new garden structure, refs #17
- Publish documentation to readthedocs, refs #18
- Publish to PyPI, refs #19
## [20190303]
- Add Python3.6 support, refs #5
- Fully migrated Android from PIL to Pillow, refs #13
- Handle Android runtime permissions, refs #30
- Fixe codes not detected on Android, refs #32
- Migrate from zbarlight to pyzbar, refs #32
- Migrate to Python3.6 and opencv 4, refs #33
## [20190223]
- Fix zbarlight dependency in setup.py, refs #28
- Migrate to zbarlight 2.1, refs #18
## [20190222]
- Provide Makefile, refs #15
- Setup continuous integration testing, refs #6, #14
- Speedup OpenCV compilation time, refs #16
- Migrated to zbarlight, refs #5, #13
- Introduced UI tests, refs #4
- Using non-root Docker container, refs #27
- Run UI tests from Travis, refs #26
## [20171220]
- Full screen camera
- Kvlang refactoring
- File tree refactoring
## [20171117]
- Integrated to kivy-garden
## [20171102]
- Add camera start/stop
## [20171020]
- Add Android autofocus, refs #2
- Fix Android rotation, refs #3
- Add animated demo gif
## [20171019]
- Add ZBar Android support, refs #1
- Add PIL/Pillow Android workaround
## [20171016]
- Initial release
| zbarcam | /zbarcam-2019.902.tar.gz/zbarcam-2019.902/CHANGELOG.md | CHANGELOG.md |
ZbarLight
=========
``zbarlight`` is a simple wrapper for the zbar library. For now, it can read all zbar supported codes. Contributions,
suggestions and pull requests are welcome.
``zbarlight`` is hosted on Github at <https://github.com/Polyconseil/zbarlight/>.
Installation
============
You need to install ZBar Bar Code Reader <http://zbar.sourceforge.net/> and its headers before installing ``zbarlight``.
On Debian
~~~~~~~~~
.. code-block:: console
$ apt-get install libzbar0 libzbar-dev
$ pip install zbarlight # you can also use setuptools directly
On Mac OS X
~~~~~~~~~~~
.. code-block:: console
$ brew install zbar
$ export LDFLAGS="-L$(brew --prefix zbar)/lib"
$ export CFLAGS="-I$(brew --prefix zbar)/include"
$ pip install zbarlight
On Windows
~~~~~~~~~~
Instruction can be found on <https://gist.github.com/Zephor5/aea563808d80f488310869b69661f330>.
How To use ZbarLight
====================
.. code-block:: python
from PIL import Image
import zbarlight
file_path = './tests/fixtures/two_qr_codes.png'
with open(file_path, 'rb') as image_file:
image = Image.open(image_file)
image.load()
codes = zbarlight.scan_codes(['qrcode'], image)
print('QR codes: %s' % codes)
Troubleshooting
===============
In some case ``zbarlight`` will not be able to detect the 1D or 2D code in an image, one of the known cause is that the
image background color is the same as the foreground color after conversion to grey scale (it's happen on images with
alpha channel). You can use the ``copy_image_on_background`` function to add a background color on your image.
.. code-block:: python
from PIL import Image
import zbarlight
file_path = './tests/fixtures/two_qr_codes.png'
with open(file_path, 'rb') as image_file:
image = Image.open(image_file)
image.load()
new_image = zbarlight.copy_image_on_background(image, color=zbarlight.WHITE) # <<<<<<<<<<<<<<<< Add this line <<<<
codes = zbarlight.scan_codes(['qrcode'], new_image)
print('QR codes: %s' % codes)
Some other cases without known solutions are show in the ``scan_codes()`` tests (search for the expected failures). Any
clues on these cases is welcome.
| zbarlight | /zbarlight-3.0.tar.gz/zbarlight-3.0/README.rst | README.rst |
ChangeLog
=========
3.0 (2020-01-02)
----------------
- Add Python 3.8 support
- Drop Python 2.7 support (end-of-life 2020-01-01) **breaking change**
- Drop Python 3.4 support (end-of-life 2019-03-18) **breaking change**
2.3 (2019-03-21)
----------------
- Support https://github.com/mchehab/zbar zbar fork (used by ArchLinux)
2.2 (2019-01-21)
----------------
- Add official support for Python 3.7
- Deprecate Python 3.4 (end-of-life 2019-03-16)
2.1 (2018-06-04)
----------------
- Allow to search for more than one kind of bar code at once
- **deprecate** scan_codes(str, Image) in favor of scan_codes(list, Image)
2.0 (2018-01-24)
----------------
- Drop deprecated qr_code_scanner() method
- Add helper to add background color on image
1.2 (2017-03-09)
----------------
- Only return asked symbologie
1.1.0 (2017-01-24)
------------------
- Officially support Python 3.6.
- Drop Python 2.6 support
1.0.2 (2016-08-03)
------------------
- Fix Install for setuptools < 22.0
1.0.1 (2016-06-17)
------------------
* Use zest.releaser
* Use tox
* Do not include tests and docs in package
1.0.0 (2015-09-25)
------------------
* Add generic ``scan_codes()`` function (which can scan multiple codes in the same image)
* Fix Python 2.6 tests
* Exclude tests from package
0.1.1 (2014-12-16)
------------------
* Minor fixes on Readme
* Include requirements-dev.txt in Manifest
0.1.0 (2014-12-12)
------------------
* First public version.
| zbarlight | /zbarlight-3.0.tar.gz/zbarlight-3.0/Changelog.rst | Changelog.rst |
import os, sys
DEFAULT_VERSION = "0.6c7"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
}
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
# The following code to parse versions is copied from pkg_resources.py so that
# we can parse versions without importing that module.
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
def setuptools_is_new_enough(required_version):
"""Return True if setuptools is already installed and has a version
number >= required_version."""
if 'pkg_resources' in sys.modules:
import pkg_resources
try:
pkg_resources.require('setuptools >= %s' % (required_version,))
except pkg_resources.VersionConflict:
# An insufficiently new version is installed.
return False
else:
return True
else:
try:
import pkg_resources
except ImportError:
# Okay it is not installed.
return False
else:
try:
pkg_resources.require('setuptools >= %s' % (required_version,))
except pkg_resources.VersionConflict:
# An insufficiently new version is installed.
pkg_resources.__dict__.clear() # "If you want to be absolutely sure... before deleting it." --said PJE on IRC
del sys.modules['pkg_resources']
return False
else:
pkg_resources.__dict__.clear() # "If you want to be absolutely sure... before deleting it." --said PJE on IRC
del sys.modules['pkg_resources']
return True
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
min_version=None, download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
if min_version is None:
min_version = version
if not setuptools_is_new_enough(min_version):
egg = download_setuptools(version, min_version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
def download_setuptools(
version=DEFAULT_VERSION, min_version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version >= %s to run (even to display
help). I will attempt to download setuptools for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
min_version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
if setuptools_is_new_enough(version):
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
else:
egg = None
try:
egg = download_setuptools(version, min_version=version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if '--md5update' in sys.argv:
sys.argv.remove('--md5update')
update_md5(sys.argv[1:])
else:
main(sys.argv[1:]) | zbase32 | /zbase32-1.1.5.tar.gz/zbase32-1.1.5/ez_setup.py | ez_setup.py |
import os, sys
DEFAULT_VERSION = "0.6c7"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
}
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
# The following code to parse versions is copied from pkg_resources.py so that
# we can parse versions without importing that module.
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
def setuptools_is_new_enough(required_version):
"""Return True if setuptools is already installed and has a version
number >= required_version."""
if 'pkg_resources' in sys.modules:
import pkg_resources
try:
pkg_resources.require('setuptools >= %s' % (required_version,))
except pkg_resources.VersionConflict:
# An insufficiently new version is installed.
return False
else:
return True
else:
try:
import pkg_resources
except ImportError:
# Okay it is not installed.
return False
else:
try:
pkg_resources.require('setuptools >= %s' % (required_version,))
except pkg_resources.VersionConflict:
# An insufficiently new version is installed.
pkg_resources.__dict__.clear() # "If you want to be absolutely sure... before deleting it." --said PJE on IRC
del sys.modules['pkg_resources']
return False
else:
pkg_resources.__dict__.clear() # "If you want to be absolutely sure... before deleting it." --said PJE on IRC
del sys.modules['pkg_resources']
return True
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
min_version=None, download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
if min_version is None:
min_version = version
if not setuptools_is_new_enough(min_version):
egg = download_setuptools(version, min_version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
def download_setuptools(
version=DEFAULT_VERSION, min_version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version >= %s to run (even to display
help). I will attempt to download setuptools for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
min_version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
if setuptools_is_new_enough(version):
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
else:
egg = None
try:
egg = download_setuptools(version, min_version=version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if '--md5update' in sys.argv:
sys.argv.remove('--md5update')
update_md5(sys.argv[1:])
else:
main(sys.argv[1:]) | zbase62 | /zbase62-1.2.0.tar.gz/zbase62-1.2.0/ez_setup.py | ez_setup.py |
LICENCE
You may use this package under the GNU General Public License, version
2 or, at your option, any later version. You may use this package
under the Transitive Grace Period Public Licence, version 1.0, or at
your option, any later version. You may use this package under the
Simple Permissive Licence.
(You may choose to use this package under the terms of either licence,
at your option.)
See the file COPYING.GPL for the terms of the GNU General Public
License, version 2. See the file COPYING.TGPPL.html for the terms of
the Transitive Grace Period Public Licence, version 1.0. See the file
COPYING.SPL.txt for the terms of the Simple Permissive Licence.
| zbase62 | /zbase62-1.2.0.tar.gz/zbase62-1.2.0/README.txt | README.txt |
darcsver - generate version numbers from darcs revision control history
=======================================================================
What Does It Do
---------------
Create files containing version numbers, based upon the latest darcs
release tag.
If your source tree is coming from darcs (i.e. it is in a darcs
repository), this tool will determine the most recent release tag,
count the patches that have been applied since then, and compute a
version number to be written into _version.py (and optionally other
version files). This version number will be available by doing:
from your_package_name import __version__
Source trees that do not come from darcs (e.g. release tarballs, nightly
tarballs) and are not within a darcs repository should instead, come with a
_version.py that was generated before the tarball was produced. In this case,
this tool will quietly exit without modifying the existing _version.py .
'release tags' are tags in the source repository that match the following
regexp:
^your_package_name-(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c|rc)(\d+))?
Installation
------------
With easy_install:
easy_install darcsver
Alternative manual installation:
tar -zxvf darcsver-X.Y.Z.tar.gz
cd darcsver-X.Y.Z
python setup.py install
Where X.Y.Z is a version number.
Alternative to make a specific package use darcsver without installing
darcsver into the system:
Put "setup_requires=['darcsver']" in the call to setup() in the
package's setup.py file.
Usage
-----
There are two ways to use this: the command-line tool and the
setuptools plugin.
To use the command-line tool, execute it as:
darcsver $PACKAGE_NAME $PATH_TO_VERSION_PY
To use the setuptools plugin (which enables you to write "./setup.py
darcsver" and which cleverly figures out where the _version.py file
ought to go), you must first package your python module with
`setup.py` and use setuptools.
The former is well documented in the distutils manual:
http://docs.python.org/dist/dist.html
To use setuptools instead of distutils, just edit `setup.py` and
change
from distutils.core import setup
to
from setuptools import setup
References
----------
How to distribute Python modules with Distutils:
http://docs.python.org/dist/dist.html
Setuptools complete manual:
http://peak.telecommunity.com/DevCenter/setuptools
Thanks to Yannick Gingras for providing the prototype for this
README.txt.
| zbase62 | /zbase62-1.2.0.tar.gz/zbase62-1.2.0/darcsver-1.7.0.egg/share/doc/python-darcsver/README.txt | README.txt |
import os, string, sys, re
import xml.dom.minidom
import subprocess
PIPE=subprocess.PIPE
from distutils import log
def all(iterable):
for thing in iterable:
if not thing:
return False
return True
OUR_VERSION_BASE_RE_STR="(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c)(\d+))?(\.dev(\d+))?"
try:
# If we can import pyutil.version_class then use its regex.
from pyutil import version_class
VERSION_BASE_RE_STR = version_class.VERSION_BASE_RE_STR
except (ImportError, AttributeError):
# Else (perhaps a bootstrapping problem),then we'll use this
# regex, which was copied from the pyutil source code on
# 2010-09-02.
VERSION_BASE_RE_STR=OUR_VERSION_BASE_RE_STR
def get_text(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
VERSION_BODY = '''
# This is the version of this tree, as created by %(versiontool)s from the darcs patch
# information: the main version number is taken from the most recent release
# tag. If some patches have been added since the last release, this will have a
# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see
# pyutil.version_class for a description of what the different fields mean.
__pkgname__ = "%(pkgname)s"
verstr = "%(pkgversion)s"
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
'''
def write_version_py(verstr, outfname, EXE_NAME, version_body, pkgname):
f = open(outfname, "wt+")
f.write(version_body % {
'versiontool': EXE_NAME,
'pkgversion': verstr,
'pkgname': pkgname,
})
f.close()
def read_version_py(infname):
try:
verstrline = open(infname, "rt").read()
except EnvironmentError:
return None
else:
VSRE = r"^verstr = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
def update(pkgname, verfilename, revision_number=False, loud=False, abort_if_snapshot=False, EXE_NAME="darcsver", version_body=VERSION_BODY):
"""
@param revision_number If true, count the total number of patches in all
history. If false, count the total number of patches since the most recent
release tag.
Returns a tuple of (exit code, new version string).
"""
if isinstance(verfilename, basestring):
verfilenames = [verfilename]
else:
verfilenames = verfilename
assert all([isinstance(vfn, basestring) for vfn in verfilenames]), [vfn for vfn in verfilenames if not isinstance(vfn, basestring)]
if isinstance(version_body, basestring):
verbodies = [version_body]
else:
verbodies = version_body
rc = -1
# First we try "darcs query repo" because if that fails then we
# won't try "darcs changes" at all, because "darcs changes" emits
# an ugly error message when run in not-a-repo.
try:
p = subprocess.Popen(["darcs", 'query', 'repo'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
except OSError, ose:
if ose.errno == 2 and '~' in os.environ['PATH']:
expanded_path = os.environ['PATH'].replace('~', os.path.expanduser('~'))
msg = ("WARNING: 'darcs' was not found. However '~' was found in your PATH. \n"
"Please note that bugs in python cause it to fail to traverse '~' in \n"
"the user's PATH. Please fix your path, e.g. \nPATH=%s" )
log.warn(msg % (expanded_path,))
pass
else:
(output, errput) = p.communicate()
rc = p.returncode
if rc == 0:
cmd = ["changes", "--xml-output"]
if not revision_number:
cmd.append("--from-tag=^%s" % (pkgname,))
try:
p = subprocess.Popen(["darcs"] + cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True)
except OSError:
pass
else:
(output, errput) = p.communicate()
rc = p.returncode
if rc != 0 and errput:
log.info("%s: darcs wrote to stderr: '%s'" % (EXE_NAME, errput,))
errput = None
else:
if all([os.path.exists(vfn) for vfn in verfilenames]):
log.info("%s: using extant version file %s" % (EXE_NAME, verfilenames))
return (0, read_version_py(verfilenames[0]))
else:
log.warn("%s: didn't find version tags with darcs, and %s don't exist." % (EXE_NAME, verfilenames))
return (rc, None)
# Filter out bad chars that can cause the XML parser to give up in despair.
# (Thanks to lelit of the tailor project and ndurner and warner for this hack.)
allbadchars = "".join([chr(i) for i in range(0x0a) + [0x0b, 0x0c] + range(0x0e, 0x20) + range(0x7f,0x100)])
tt = string.maketrans(allbadchars, "-"*len(allbadchars))
output = output.translate(tt)
regexstr = "^TAG %s-(%s)$" % (pkgname, VERSION_BASE_RE_STR)
last_tag = None
# strip off trailing warning messages that darcs 2.3.1 writes to stdout
endi = output.find("</changelog>")+len("</changelog>")
if endi != -1:
output = output[:endi]
try:
doc = xml.dom.minidom.parseString(output)
except xml.parsers.expat.ExpatError:
# Okay maybe this is an error message instead of an XML output.
pass
else:
changelog = doc.getElementsByTagName("changelog")[0]
patches = changelog.getElementsByTagName("patch")
version_re = re.compile(regexstr)
count_since_last_patch = 0
if abort_if_snapshot:
for patch in patches:
name = get_text(patch.getElementsByTagName("name")[0].childNodes)
m = version_re.match(name)
if m:
last_tag = m.group(1)
last_tag = last_tag.encode("utf-8")
break
else:
sys.exit(0) # because abort_if_snapshot
else:
for patch in patches:
name = get_text(patch.getElementsByTagName("name")[0].childNodes)
m = version_re.match(name)
if m:
last_tag = m.group(1)
last_tag = last_tag.encode("utf-8")
break
else:
count_since_last_patch += 1
if not last_tag:
if errput:
log.info("%s: darcs wrote to stderr: '%s'" % (EXE_NAME, errput,))
errput = None
assert all([isinstance(vfn, basestring) for vfn in verfilenames]), [vfn for vfn in verfilenames if not isinstance(vfn, basestring)]
if all([os.path.exists(vfn) for vfn in verfilenames]):
log.warn("%s: I'm unable to find a tag in the darcs history matching \"%s\", so I'm leaving %s alone." % (EXE_NAME, regexstr, verfilenames,))
return (0, read_version_py(verfilenames[0]))
else:
log.warn("%s: I'm unable to find a tag in the darcs history matching \"%s\", and %s don't exist." % (EXE_NAME, regexstr, verfilenames,))
return (-1, None)
if revision_number:
if count_since_last_patch:
# this is an interim version
verstr = "%s-r%d" % (last_tag, len(patches))
else:
# this is a release
verstr = last_tag
else:
if count_since_last_patch:
# this is an interim version
verstr = "%s-%d" % (last_tag, count_since_last_patch)
else:
# this is a release
verstr = last_tag
for verfn, verbod in zip(verfilenames, verbodies):
write_version_py(verstr, verfn, EXE_NAME, verbod, pkgname)
log.info("%s: wrote '%s' into %s" % (EXE_NAME, verstr, verfn,))
return (0, verstr) | zbase62 | /zbase62-1.2.0.tar.gz/zbase62-1.2.0/darcsver-1.7.0.egg/darcsver/darcsvermodule.py | darcsvermodule.py |
import os
import setuptools
from darcsver import darcsvermodule
from distutils.errors import DistutilsSetupError
def validate_string_or_iter_of_strings(dist, attr, value):
# value is required to be a string or else a list of strings
if isinstance(value, basestring):
return
try:
for thing in value:
if not isinstance(thing, basestring):
raise DistutilsSetupError("%r is required to be a string or an iterable of strings (got %r)" % (attr, value))
except TypeError:
raise DistutilsSetupError("%r is required to be a string or an iterable of strings (got %r)" % (attr, value))
def validate_versionfiles(dist, attr, value):
return validate_string_or_iter_of_strings(dist, attr, value)
def validate_versionbodies(dist, attr, value):
return validate_string_or_iter_of_strings(dist, attr, value)
PYTHON_VERSION_BODY='''
# This is the version of this tree, as created by %(versiontool)s from the darcs patch
# information: the main version number is taken from the most recent release
# tag. If some patches have been added since the last release, this will have a
# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see
# pyutil.version_class for a description of what the different fields mean.
__pkgname__ = "%(pkgname)s"
verstr = "%(pkgversion)s"
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed, or this may be an older version of
# pyutil.version_class which does not support SVN-alike revision numbers.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
'''
class DarcsVer(setuptools.Command):
description = "generate a version number from darcs history"
user_options = [
('project-name', None, "name of the project as it appears in the project's release tags (default's the to the distribution name)"),
('filename', None, "path to file into which the version number should be written (defaults to the package directory's _version.py)"),
('count-all-patches', None, "If true, count the total number of patches in all history. If false, count the total number of patches since the most recent release tag."),
('abort-if-snapshot', None, "If true, the if the current version is a snapshot (not a release tag), then immediately exit the process with exit code 0."),
]
def initialize_options(self):
self.project_name = None
self.filename = None
self.count_all_patches = None
self.abort_if_snapshot = None
def finalize_options(self):
if self.project_name is None:
self.project_name = self.distribution.get_name()
# If the user passed --filename on the cmdline, override
# the setup.py's versionfiles argument.
if self.filename is not None:
if not isinstance(self.filename, basestring):
raise TypeError("filename is required to be a string, not %s, filename: %s" % (type(self.filename), self.filename))
self.distribution.versionfiles = [self.filename]
if self.abort_if_snapshot is None:
self.abort_if_snapshot=False
def run(self):
if self.distribution.versionfiles is None:
toppackage = ''
# If there is a package with the same name as the project name and
# there is a directory by that name then use that.
packagedir = None
if self.distribution.packages and self.project_name in self.distribution.packages:
toppackage = self.project_name
srcdir = ''
if self.distribution.package_dir:
srcdir = self.distribution.package_dir.get(toppackage)
if not srcdir is None:
srcdir = self.distribution.package_dir.get('', '')
packagedir = os.path.join(srcdir, toppackage)
if packagedir is None or not os.path.isdir(packagedir):
# Else, if there is a singly-rooted tree of packages, use the
# root of that.
if self.distribution.packages:
for package in self.distribution.packages:
if not toppackage:
toppackage = package
else:
if toppackage.startswith(package+"."):
toppackage = package
else:
if not package.startswith(toppackage+"."):
# Not singly-rooted
toppackage = ''
break
srcdir = ''
if self.distribution.package_dir:
srcdir = self.distribution.package_dir.get(toppackage)
if srcdir is None:
srcdir = self.distribution.package_dir.get('', '')
packagedir = os.path.join(srcdir, toppackage)
self.distribution.versionfiles = [os.path.join(packagedir, '_version.py')]
if self.distribution.versionbodies is None:
self.distribution.versionbodies = [PYTHON_VERSION_BODY]
assert all([isinstance(vfn, basestring) for vfn in self.distribution.versionfiles]), self.distribution.versionfiles
(rc, verstr) = darcsvermodule.update(self.project_name, self.distribution.versionfiles, self.count_all_patches, abort_if_snapshot=self.abort_if_snapshot, EXE_NAME="setup.py darcsver", version_body=self.distribution.versionbodies)
if rc == 0:
self.distribution.metadata.version = verstr | zbase62 | /zbase62-1.2.0.tar.gz/zbase62-1.2.0/darcsver-1.7.0.egg/darcsver/setuptools_command.py | setuptools_command.py |
# zbaseball-client
[](https://badge.fury.io/py/zbaseballdata)
A python client for the [zBaseballData](https://www.zbaseballdata.com/) API.
*"Retrosheet Data as a Service"*
### Note
This is a simple client and still being developed. It might have bugs, and we don't have a lot of tests. This will change.
### Getting Started
1. Create a free account @ [zBaseballData](https://www.zbaseballdata.com/) & confirm your email
2. Install the Python Client
```bash
pip install zbaseballdata
```
3. Initialize a client
```python
from zbaseballdata.client import ZBaseballDataClient
# Supply the credentials you used during the sign-up process
client = ZBaseballDataClient(username="USERNAME", password="PASSWORD")
```
4. Begin Pulling Data
```python
from pprint import pprint
players = list(client.get_players(search='jeter'))
pprint(players)
"""
[{'retro_id': 'jeted001',
'first_name': 'Derek',
'last_name': 'Jeter',
'debut': datetime.date(1995, 5, 29),
'throw': 'R',
'bat': 'R'},
{'retro_id': 'jetej101',
'first_name': 'Johnny',
'last_name': 'Jeter',
'debut': datetime.date(1969, 6, 14),
'throw': 'R',
'bat': 'R'},
{'retro_id': 'jetes001',
'first_name': 'Shawn',
'last_name': 'Jeter',
'debut': datetime.date(1992, 6, 13),
'throw': 'R',
'bat': 'L'}]
"""
```
### Example Code
| zbaseballdata | /zbaseballdata-0.2.0.tar.gz/zbaseballdata-0.2.0/README.md | README.md |
import ast
import gzip
import io
import os
import platform
import re
import sys
def header(title):
"""Print title in between 2 lines of 72 "-" """
print(72*"-")
print(title)
print(72*"-")
def get_kernel_config_info():
""" Find zone arguments
Scan through specific modules for zone arguments
using the the config file/s,
"""
long_uname = platform.uname()
printed_kernel_version = long_uname.release.strip('.#')
file_path_check = r'/proc/config.gz'
flag = os.path.isfile(file_path_check)
if flag:
file_name = '/proc/config.gz'
gz_open_file = gzip.open(file_name, 'rb')
open_file = io.TextIOWrapper(gz_open_file, encoding='utf-8')
else:
long_uname = platform.uname()
printed_kernel_version = long_uname.release.strip('.#')
file_name = '/boot/config-' + printed_kernel_version
open_file = open(file_name)
for _, line in enumerate(open_file):
if line.startswith('#'):
continue
if txt_pattern := re.match(r'([0-9a-zA-Z_]+)(.*)', line):
name, val = txt_pattern.groups()
yield name, val
def get_distro_general_information():
"""Check the distro to use os-release for pretty name
a) Checks to see if the file exits in either location
b) When found strip the whitespace skip the commented lines
c) Match the pattern that is being asked in the pretty name
using regex
d) if no matched are found show the unmatched lines.
"""
try:
file_name = '/etc/os-release'
open_file = open(file_name)
except FileNotFoundError:
file_name = '/usr/lib/os-release'
open_file = open(file_name)
for each_line, line in enumerate(open_file):
line = line.rstrip()
if not line or line.startswith('#'):
continue
if txt_pattern := re.match(r'([a-zA-Z0-9_]+)=(.*)', line):
name, val = txt_pattern.groups()
if val and val[0] in '"\'':
val = ast.literal_eval(val)
yield name, val
else:
print(f'{file_name}:{each_line + 1}: bad line {line!r}',
file=sys.stderr)
def get_kernel_general_information():
""" Kernel Version information.
Formatted, and determine if the system kernel is supported.
"""
characters = 4
uname = platform.uname()
kernel_version = uname.release
format_kernel = (kernel_version[:characters]) + "#"
display_kernel = (format_kernel).strip('.#')
numeric_value = float(display_kernel)
min_numeric_value = float('4.10')
strdisplay = '- Kernel Version: ' + display_kernel
if numeric_value >= min_numeric_value:
print(
strdisplay
)
else:
print(
strdisplay
) | zbd-tools | /zbd_tools-1.0.1-py3-none-any.whl/check/helpers.py | helpers.py |
import glob
import argparse
import os
import sys
import shutil
import check.helpers
def evaluate_using_modinfo():
""" Review the current modinfo data.
Check for null_blk and scsi_debug Zoned support
"""
cmd_scsi_debug = os.popen('/sbin/modinfo scsi_debug | grep zbc')
cmd_null_blk = os.popen('/sbin/modinfo null_blk | grep zoned:')
scsi_debug_result = str(cmd_scsi_debug.read())
null_blk_reslut = str(cmd_null_blk.read())
check_scsi_debug_result = len(scsi_debug_result)
check_null_blk_reslut = len(null_blk_reslut)
if check_scsi_debug_result == 0:
print(
"\n *modinfo indicates current loaded kernel does not"
" support zoned scsi_debug")
else:
pass
if check_null_blk_reslut == 0:
print(
"\n *modinfo indicates current loaded kernel does not support"
" zoned null_blk")
else:
pass
def evaluate_kernel_config_features():
""" Gather the kernel features available.
Then evaluate the statues and display the result to the user
"""
# get the results from the function that reads the config file
search_kernel_config = dict(helpers.get_kernel_config_info())
config_data_values = {
"=m": "",
"=y": "",
"=n": "not ",
" is not set": "not "
}
# Core support
core_support = config_data_values.get(
search_kernel_config.get('CONFIG_BLK_DEV_ZONED'),
"not ")
print("- Zoned block devices: " + core_support + "supported")
if core_support == "not ":
print("This system does not support zoned block devices.")
print("Only applications using passthrough direct access")
print("devices will work.")
sys.exit()
# Device types
print("- Devices types:")
print(" - SAS and SATA SMR hard-disks: supported")
print(" - NVMe ZNS devices: " +
config_data_values.get(
search_kernel_config.get('CONFIG_BLK_DEV_NVME'),
"not ") + "supported")
print(" - SCSI debug device ZBC emulation: " +
config_data_values.get(
search_kernel_config.get('CONFIG_SCSI_DEBUG'),
"not ") + "supported")
print(" - null_blk device zoned mode: " +
config_data_values.get(
search_kernel_config.get('CONFIG_BLK_DEV_NULL_BLK'),
"not ") + "supported")
# File systems
print("- file systems:")
print(" - zonefs: " +
config_data_values.get(
search_kernel_config.get('CONFIG_ZONEFS_FS'),
"not ") + "supported")
print(" - f2fs zoned mode: " +
config_data_values.get(
search_kernel_config.get('CONFIG_F2FS_FS'),
"not ") + "supported")
print(" - btrfs zoned mode: " +
config_data_values.get(
search_kernel_config.get('CONFIG_BTRFS_FS'),
"not ") + "supported")
# Device mapper targets
print("- Device mapper targets:")
print(" - dm-linear: " +
config_data_values.get(
search_kernel_config.get('CONFIG_BLK_DEV_DM'),
"not ") + "supported")
print(" - dm-flakey: " +
config_data_values.get(
search_kernel_config.get('CONFIG_DM_FLAKEY'),
"not ") + "supported")
print(" - dm-crypt: " +
config_data_values.get(
search_kernel_config.get('CONFIG_DM_CRYPT'),
"not ") + "supported")
print(" - dm-zoned: " +
config_data_values.get(
search_kernel_config.get('CONFIG_DM_ZONED'),
"not ") + "supported")
def evaluate_kernel_api():
"""blkzoned.h"""
if os.path.exists('/usr/include/linux/blkzoned.h'):
print("- Zone management kernel API header file: installed")
else:
print("- Zone management kernel API header: not installed")
print(" WARNING: the kernel zone management API header file")
print(" /usr/include/linux/blkzoned.h was not found."
" User libraries")
print(" and applications using the kernel zone management"
" API will")
print(" not compile correctly or will be compiled without"
" zoned")
print(" block device support.")
def evaluate_fio():
"""Determine if fio is installed"""
if shutil.which("fio") is None:
print("- fio: not installed")
return
ver = os.popen('fio --version | head -n 1')
ver_text = str(ver.read())
print("- fio: installed, version " + ver_text.rstrip())
def evaluate_nvme():
"""Determine if nvme-cli is installed"""
if shutil.which("nvme") is None:
print("- nvme-cli: not installed")
return
ver = os.popen('nvme --version | head -n 1 | cut -f3 -d" "')
ver_text = str(ver.read())
print("- nvme-cli: installed, version " + ver_text.rstrip())
def evaluate_dm_zoned_tools():
"""Determine if dm-zoned-tools is installed"""
if shutil.which("dmzadm") is None:
print("- dm-zoned-tools: not installed")
return
ver = os.popen('dmzadm --version')
ver_text = str(ver.read())
print("- dm-zoned-tools: installed, version " + ver_text.rstrip())
def evaluate_zonefs_tools():
"""Determine if zonefs-tools is installed"""
if shutil.which("mkzonefs") is None:
print("- zonefs-tools: not installed")
return
ver = os.popen('mkzonefs --version | head -n 1 | cut -f3 -d" "')
ver_text = str(ver.read())
print("- zonefs-tools: installed, version " + ver_text.rstrip())
def evaluate_packages():
"""Determine what packages are installed"""
evaluate_fio()
evaluate_nvme()
evaluate_dm_zoned_tools()
evaluate_zonefs_tools()
def evaluate_library_dynamic(lib):
"""Determine if a dynamic library is installed"""
libso = lib + ".so"
lib64path = r'/usr/lib64/' + libso + '.*'
libpath = r'/usr/lib/' + libso + '.*'
if os.path.exists("/usr/lib64/" + libso) or os.path.exists(
"/usr/lib/" + libso):
ver = os.popen("pkg-config --modversion " + lib)
ver_text = str(ver.read())
print(" - Dynamic library installed, version " + ver_text.rstrip())
elif glob.glob(lib64path) or glob.glob(libpath):
print(" - Dynamic library installed")
else:
print(" - Dynamic library not installed")
def evaluate_library_static(lib):
"""Determine if a static library is installed"""
liba = lib + ".a"
if os.path.exists("/usr/lib64/" + liba) or os.path.exists(
"/usr/lib/" + liba):
print(" - Static library installed")
else:
print(" - Static library not installed")
def evaluate_library_header(header):
"""Determine if a library development header files are installed"""
if os.path.exists("/usr/include/" + header):
print(" - Development header files installed")
else:
print(" - Development header files not installed")
def evaluate_libraries():
""" Determine if libraries are installed """
print("- libzbc:")
evaluate_library_dynamic("libzbc")
evaluate_library_static("libzbc")
evaluate_library_header("libzbc/zbc.h")
print("- libzbd:")
evaluate_library_dynamic("libzbd")
evaluate_library_static("libzbd")
evaluate_library_header("libzbd/zbd.h")
print("- libnvme:")
evaluate_library_dynamic("libnvme")
evaluate_library_static("libnvme")
evaluate_library_header("libnvme.h")
def main():
"""Main Script execution"""
# Get General Information about the host OS
helpers.header("System Information:")
os_release = dict(helpers.get_distro_general_information())
pretty_name = os_release.get('PRETTY_NAME')
print(f'- Distribution: {pretty_name}')
helpers.get_kernel_general_information()
print()
# Get the kernel configuration information
helpers.header("Kernel features:")
evaluate_kernel_config_features()
evaluate_using_modinfo()
print()
# User kernel API
helpers.header("User Kernel zone management API:")
evaluate_kernel_api()
print()
# Check to see what libraries exists
helpers.header("User Libraries:")
evaluate_libraries()
print()
# Check to see what packages exists
helpers.header("User Applications:")
evaluate_packages()
print()
def version_information():
""" Show the Version of the script"""
print("1.0.1")
def cli_arguments():
"""Command Line Arguyments used"""
cli_parser = argparse.ArgumentParser()
cli_parser.add_argument("--version", help="show the version of zbd-check",
action="store_true")
args = cli_parser.parse_args()
if args.version:
version_information()
if len(sys.argv) == 1:
main() | zbd-tools | /zbd_tools-1.0.1-py3-none-any.whl/check/__init__.py | __init__.py |
# `zbench` <img align="right" src="https://badge.fury.io/py/zbench.svg">
This package benchmarks lossless compression algorithms like `gzip` and `bzip2`,
facilitating quick comparison of the trade-offs between disk space and CPU time on _your_ file(s).
It is implemented in Python and provides a command line tool, `zbench`.
Install from [PyPI](https://pypi.org/project/zbench/):
```sh
pip install zbench
```
## Example
```console
$ zbench setup.cfg
path algorithm size percentage time
setup.cfg raw_py 853 100.000% 0.000
setup.cfg gzip_py 488 57.210% 0.000
setup.cfg bz2_py 547 64.127% 0.000
setup.cfg lzma_py 572 67.057% 0.011
setup.cfg zlib_py 476 55.803% 0.000
```
## License
Copyright 2020 Christopher Brown.
[MIT Licensed](https://chbrown.github.io/licenses/MIT/#2020).
| zbench | /zbench-0.1.0.tar.gz/zbench-0.1.0/README.md | README.md |
zbg
===
Python library for zbg encoding.
Installation
===
pip install zbg
Use
===
Please see the ZBG standard for details on what objects are supported. The short version is you may serialize anything consisting of only:
+ Lists
+ Dictionaries
+ Arbitrary binary
In this particular implementation, to be correctly interpreted, the corresponding python entities should be compatible with:
+ Mutable sequence (see containers.abc.MutableSequence)
+ Mapping (see containers.abc.Mapping)
+ Bytes-like objects (support the buffer protocol)
```dump(obj)```: serializes the entire ```obj```. Object must be as described above. Returns bytes.
```dumpf(obj, f)```: Serializes the ```obj``` to a standalone file. ```f``` is the *open* file handle to output to. ```dumpf``` will not call close(). ```f``` should be opened with mode ```'w+b'```.
```load(zbg)```: Loads a zbg-serialized bytes-like object into memory. Returns an object corresponding to whatever was contained within ```zbg```.
```loadf(f)```: Loads a standalone zbg-serialized file from ```f```. Returns an object corresponding to whatever was contained within ```zbg```. ```f``` should be opened with mode ```'rb'``` and will not be closed by the load. | zbg | /zbg-0.1.1.zip/zbg-0.1.1/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
import warnings
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
DEFAULT_VERSION = "14.3"
DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
DEFAULT_SAVE_DIR = os.curdir
def _python_cmd(*args):
"""
Execute a command.
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
"""Install Setuptools."""
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
"""Build Setuptools egg."""
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""Supplement ZipFile class to support context manager for Python 2.6."""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""Construct a ZipFile or ContextualZipFile as appropriate."""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
"""
Unzip filename to a temporary directory, set to the cwd.
The unzipped target is cleaned up after.
"""
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
with ContextualZipFile(filename) as archive:
archive.extractall()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
"""Download Setuptools."""
egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
del sys.modules['pkg_resources']
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, download_delay=15):
"""
Ensure that a setuptools version is installed.
Return None. Raise SystemExit if the requested version
or later cannot be installed.
"""
to_dir = os.path.abspath(to_dir)
# prior to importing, capture the module state for
# representative modules.
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
pkg_resources.require("setuptools>=" + version)
# a suitable version is already installed
return
except ImportError:
# pkg_resources not available; setuptools is not installed; download
pass
except pkg_resources.DistributionNotFound:
# no version of setuptools was found; allow download
pass
except pkg_resources.VersionConflict as VC_err:
if imported:
_conflict_bail(VC_err, version)
# otherwise, unload pkg_resources to allow the downloaded version to
# take precedence.
del pkg_resources
_unload_pkg_resources()
return _do_download(version, download_base, to_dir, download_delay)
def _conflict_bail(VC_err, version):
"""
Setuptools was imported prior to invocation, so it is
unsafe to unload it. Bail out.
"""
conflict_tmpl = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""")
msg = conflict_tmpl.format(**locals())
sys.stderr.write(msg)
sys.exit(2)
def _unload_pkg_resources():
del_modules = [
name for name in sys.modules
if name.startswith('pkg_resources')
]
for mod_name in del_modules:
del sys.modules[mod_name]
def _clean_check(cmd, target):
"""
Run the command to download target.
If the command fails, clean up before re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell.
Powershell will validate trust.
Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
"(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
% vars()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
"""Determine if Powershell is available."""
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""Use Python to download the file, without connection authentication."""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, delay=15,
downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename.
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package.
Returns list of command line arguments.
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""Parse the command line for options."""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
parser.add_option(
'--to-dir',
help="Directory to save (and re-use) package",
default=DEFAULT_SAVE_DIR,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def _download_args(options):
"""Return args for download_setuptools function from cmdline args."""
return dict(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
to_dir=options.to_dir,
)
def main():
"""Install or upgrade setuptools and EasyInstall."""
options = _parse_args()
archive = download_setuptools(**_download_args(options))
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main()) | zbg | /zbg-0.1.1.zip/zbg-0.1.1/ez_setup.py | ez_setup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.