code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import collections
import threading
import sys
import grpc
from . import health_pb2 as _health_pb2
from . import health_pb2_grpc as _health_pb2_grpc
if sys.version_info[0] >= 3 and sys.version_info[1] >= 6:
# Exposes AsyncHealthServicer as public API.
from . import _async as aio # pylint: disable=unused-import
# The service name of the health checking servicer.
SERVICE_NAME = _health_pb2.DESCRIPTOR.services_by_name['Health'].full_name
# The entry of overall health for the entire server.
OVERALL_HEALTH = ''
class _Watcher():
def __init__(self):
self._condition = threading.Condition()
self._responses = collections.deque()
self._open = True
def __iter__(self):
return self
def _next(self):
with self._condition:
while not self._responses and self._open:
self._condition.wait()
if self._responses:
return self._responses.popleft()
else:
raise StopIteration()
def next(self):
return self._next()
def __next__(self):
return self._next()
def add(self, response):
with self._condition:
self._responses.append(response)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def _watcher_to_send_response_callback_adapter(watcher):
def send_response_callback(response):
if response is None:
watcher.close()
else:
watcher.add(response)
return send_response_callback
class HealthServicer(_health_pb2_grpc.HealthServicer):
"""Servicer handling RPCs for service statuses."""
def __init__(self,
experimental_non_blocking=True,
experimental_thread_pool=None):
self._lock = threading.RLock()
self._server_status = {"": _health_pb2.HealthCheckResponse.SERVING}
self._send_response_callbacks = {}
self.Watch.__func__.experimental_non_blocking = experimental_non_blocking
self.Watch.__func__.experimental_thread_pool = experimental_thread_pool
self._gracefully_shutting_down = False
def _on_close_callback(self, send_response_callback, service):
def callback():
with self._lock:
self._send_response_callbacks[service].remove(
send_response_callback)
send_response_callback(None)
return callback
def Check(self, request, context):
with self._lock:
status = self._server_status.get(request.service)
if status is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return _health_pb2.HealthCheckResponse()
else:
return _health_pb2.HealthCheckResponse(status=status)
# pylint: disable=arguments-differ
def Watch(self, request, context, send_response_callback=None):
blocking_watcher = None
if send_response_callback is None:
# The server does not support the experimental_non_blocking
# parameter. For backwards compatibility, return a blocking response
# generator.
blocking_watcher = _Watcher()
send_response_callback = _watcher_to_send_response_callback_adapter(
blocking_watcher)
service = request.service
with self._lock:
status = self._server_status.get(service)
if status is None:
status = _health_pb2.HealthCheckResponse.SERVICE_UNKNOWN # pylint: disable=no-member
send_response_callback(
_health_pb2.HealthCheckResponse(status=status))
if service not in self._send_response_callbacks:
self._send_response_callbacks[service] = set()
self._send_response_callbacks[service].add(send_response_callback)
context.add_callback(
self._on_close_callback(send_response_callback, service))
return blocking_watcher
def set(self, service, status):
"""Sets the status of a service.
Args:
service: string, the name of the service.
status: HealthCheckResponse.status enum value indicating the status of
the service
"""
with self._lock:
if self._gracefully_shutting_down:
return
else:
self._server_status[service] = status
if service in self._send_response_callbacks:
for send_response_callback in self._send_response_callbacks[
service]:
send_response_callback(
_health_pb2.HealthCheckResponse(status=status))
def enter_graceful_shutdown(self):
"""Permanently sets the status of all services to NOT_SERVING.
This should be invoked when the server is entering a graceful shutdown
period. After this method is invoked, future attempts to set the status
of a service will be ignored.
This is an EXPERIMENTAL API.
"""
with self._lock:
if self._gracefully_shutting_down:
return
else:
for service in self._server_status:
self.set(service,
_health_pb2.HealthCheckResponse.NOT_SERVING) # pylint: disable=no-member
self._gracefully_shutting_down = True | zdppy-grpc | /zdppy_grpc-0.1.0-py3-none-any.whl/zdppy_grpc/health/v1/health.py | health.py |
import json
import os
from typing import Union, Tuple, List, Dict
from .encoder import DecimalEncoder
def loads(json_str: str):
"""
将json字符串加载为python对象
:return:
"""
return json.loads(json_str)
def dumps(py_obj):
"""
将python对象转换为json字符串
:param py_obj:
:return:
"""
return json.dumps(py_obj, cls=DecimalEncoder)
def load(file_name: str):
"""
加载文件数据
:param file_name:
:return:
"""
data = None
with open(file_name, "r", encoding="UTF-8") as f:
data = json.load(f)
return data
def dump(file_name: str, data):
"""
写入文件数据
:param file_name:
:return:
"""
with open(file_name, "w+", encoding="UTF-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
class Json:
def __init__(self,
config: str = "config/config.json",
config_secret: str = "config/secret/.config.json"
):
"""
初始化json对象
:param config:公开的配置文件
:param config_secret: 私密的配置文件
"""
self.__data = None
self.__config_file = config
self.__config_secret_file = config_secret
self.config = {} # 配置信息
self.__init_config() # 初始化配置
def __init_config(self):
"""
初始化配置
:return:
"""
# 读取公共配置
if os.path.exists(self.__config_file):
with open(self.__config_file, "r") as f:
config = json.load(f)
self.config.update(config)
# 读取私密配置
if os.path.exists(self.__config_secret_file):
with open(self.__config_secret_file, "r") as f:
config = json.load(f)
self.config.update(config)
def set(self, data):
self.__data = data
def __read_config(self, config: str):
"""
读取单个配置文件
:param config: 配置文件
:return:
"""
if os.path.exists(config):
with open(config, "r") as f:
c = json.load(f)
self.config.update(c)
def read_config(self, config: Union[str, List, Tuple]):
"""
读取配置
:return:
"""
# 读取单个文件
if isinstance(config, str):
self.__read_config(config)
# 读取多个文件
elif isinstance(config, tuple) or isinstance(config, list):
for c in config:
self.__read_config(c)
def save_config(self, config: str = "config/zdppy_json_config.json"):
"""
保存配置
:param config:配置文件名称
:return:
"""
with open(config, "w") as f:
json.dump(f, self.config, ensure_ascii=False)
def update_config(self, config: Union[Dict, str, List, Tuple]):
"""
更新配置
:param config:配置文件信息
:return:
"""
if isinstance(config, dict):
self.config.update(config)
else:
self.read_config(config)
def __str__(self):
return json.dumps(self.config) | zdppy-json | /zdppy_json-0.1.0-py3-none-any.whl/zdppy_json/zdppy_json.py | zdppy_json.py |
# zdppy_log
python的日志库
项目地址:https://github.com/zhangdapeng520/zdppy_log
## 版本历史
- v0.1.9 新增:自动记录动态参数和字典参数
- v0.2.0 新增:代码优化和示例丰富
## 安装方式
```shell script
pip install zdppy_log
```
## 使用方式
```python
from zdppy_log import Log
log1 = Log("logs/zdppy/zdppy_log1.log")
@log1.catch()
def my_function(x, y, z):
# An error? It's caught anyway!
return 1 / (x + y + z)
my_function(0, 0, 0)
# logger.add("out.log", backtrace=True, diagnose=True) # Caution, may leak sensitive data in prod
log2 = Log("logs/zdppy/zdppy_log2.log")
log2.debug("log2日志")
log2.info("log2日志")
log2.warning("log2日志")
log2.error("log2日志")
log2.critical("log2日志")
log3 = Log("logs/zdppy/zdppy_log3.log", debug=False)
log3.debug("log3日志")
log3.info("log3日志")
log3.warning("log3日志")
log3.error("log3日志")
log3.critical("log3日志")
```
## 版本历史
- v0.1.2 2022/2/19 增加debug模式;默认json日志为False
- v0.1.3 2022/3/4 增加记录日志文件,日志方法,日志行数的功能
- v0.1.4 2022/3/5 移除第三方依赖
- v0.1.5 2022/3/5 增加控制是否开启日志全路径的开关量
- v0.1.6 2022/3/16 增加只输出到控制台的开关量及底层代码优化
- v0.1.7 2022/3/16 BUG修复及完善使用文档
- v0.1.8 2022/5/17 优化:底层代码结构优化
## 使用案例
### 案例1:基本使用
```python
from zdppy_log import Log
log1 = Log("logs/zdppy/zdppy_log1.log")
log2 = Log("logs/zdppy/zdppy_log2.log")
log2.debug("log2日志")
log2.info("log2日志")
log2.warning("log2日志")
log2.error("log2日志")
log2.critical("log2日志")
log3 = Log("logs/zdppy/zdppy_log3.log", debug=False)
log3.debug("log3日志")
log3.info("log3日志")
log3.warning("log3日志")
log3.error("log3日志")
log3.critical("log3日志")
```
### 案例2:捕获方法错误
```python
from zdppy_log import Log
log1 = Log("logs/zdppy/zdppy_log1.log")
@log1.catch()
def my_function(x, y, z):
return 1 / (x + y + z)
my_function(0, 0, 0)
```
### 案例3:只往控制台输出
```python
from zdppy_log import Log
# 记录所有级别的日志到控制台
log1 = Log(debug=True, is_only_console=True)
log1.debug("log1 debug")
log1.info("log1 info")
log1.warning("log1 warning")
log1.error("log1 error")
log1.critical("log1 critical")
# 记录info以上级别的日志到控制台
log2 = Log(debug=False, is_only_console=True)
log2.debug("log2 debug")
log2.info("log2 info")
log2.warning("log2 warning")
log2.error("log2 error")
log2.critical("log2 critical")
# 记录error以上级别的日志到控制台
log3 = Log(debug=False, level="ERROR", is_only_console=True)
log3.debug("log3 debug")
log3.info("log3 info")
log3.warning("log3 warning")
log3.error("log3 error")
log3.critical("log3 critical")
```
### 案例4:同时输出到控制台和日志文件
```python
from zdppy_log import Log
# 记录info级别的日志,并将所有级别日志输出到控制台
log1 = Log(debug=True)
log1.debug("log1 debug")
log1.info("log1 info")
log1.warning("log1 warning")
log1.error("log1 error")
log1.critical("log1 critical")
# 记录info以上级别的日志,不输出到控制台
log2 = Log(debug=False)
log2.debug("log2 debug")
log2.info("log2 info")
log2.warning("log2 warning")
log2.error("log2 error")
log2.critical("log2 critical")
# 记录error以上级别的日志不输出到控制台
log3 = Log(debug=False, level="ERROR")
log3.debug("log3 debug")
log3.info("log3 info")
log3.warning("log3 warning")
log3.error("log3 error")
log3.critical("log3 critical")
```
### 案例5:日志序列化为JSON
```python
from zdppy_log import Log
# 记录info级别的日志,并将所有级别日志输出到控制台
log1 = Log(serialize=True, debug=True)
log1.debug("log1 debug")
log1.info("log1 info")
log1.warning("log1 warning")
log1.error("log1 error")
log1.critical("log1 critical")
# 记录info以上级别的日志,不输出到控制台
log2 = Log(serialize=True, debug=False)
log2.debug("log2 debug")
log2.info("log2 info")
log2.warning("log2 warning")
log2.error("log2 error")
log2.critical("log2 critical")
``` | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/README.md | README.md |
import pickle
from collections import namedtuple
class RecordLevel:
__slots__ = ("name", "no", "icon")
def __init__(self, name, no, icon):
self.name = name
self.no = no
self.icon = icon
def __repr__(self):
return "(name=%r, no=%r, icon=%r)" % (self.name, self.no, self.icon)
def __format__(self, spec):
return self.name.__format__(spec)
class RecordFile:
__slots__ = ("name", "path")
def __init__(self, name, path):
self.name = name
self.path = path
def __repr__(self):
return "(name=%r, path=%r)" % (self.name, self.path)
def __format__(self, spec):
return self.name.__format__(spec)
class RecordThread:
__slots__ = ("id", "name")
def __init__(self, id_, name):
self.id = id_
self.name = name
def __repr__(self):
return "(id=%r, name=%r)" % (self.id, self.name)
def __format__(self, spec):
return self.id.__format__(spec)
class RecordProcess:
__slots__ = ("id", "name")
def __init__(self, id_, name):
self.id = id_
self.name = name
def __repr__(self):
return "(id=%r, name=%r)" % (self.id, self.name)
def __format__(self, spec):
return self.id.__format__(spec)
class RecordException(namedtuple("RecordException", ("type", "value", "traceback"))):
def __repr__(self):
return "(type=%r, value=%r, traceback=%r)" % (self.type, self.value, self.traceback)
def __reduce__(self):
# The traceback is not picklable so we need to remove it. Also, some custom exception
# values aren't picklable either. For user convenience, we try first to serialize it and
# we remove the value in case or error. As an optimization, we could have re-used the
# dumped value during unpickling, but this requires using "pickle.loads()" which is
# flagged as insecure by some security tools.
try:
pickle.dumps(self.value)
except pickle.PickleError:
return (RecordException, (self.type, None, None))
else:
return (RecordException, (self.type, self.value, None)) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_recattrs.py | _recattrs.py |
import builtins
import contextlib
import functools
import itertools
import logging
import re
import sys
import warnings
from collections import namedtuple
from inspect import isclass, iscoroutinefunction, isgeneratorfunction
from multiprocessing import current_process
from os.path import basename, splitext
from threading import current_thread
from . import _colorama, _defaults, _asyncio_loop, _filters
from ._better_exceptions import ExceptionFormatter
from ._colorizer import Colorizer
from ._contextvars import ContextVar
from ._datetime import aware_now
from ._error_interceptor import ErrorInterceptor
from ._file_sink import FileSink
from ._get_frame import get_frame
from ._handler import Handler
from ._locks_machinery import create_logger_lock
from ._recattrs import RecordException, RecordFile, RecordLevel, RecordProcess, RecordThread
from ._simple_sinks import AsyncSink, CallableSink, StandardSink, StreamSink
if sys.version_info >= (3, 6):
from os import PathLike
else:
from pathlib import PurePath as PathLike
Level = namedtuple("Level", ["name", "no", "color", "icon"])
start_time = aware_now()
context = ContextVar("loguru_context", default={})
class Core:
"""
日志核心类
"""
def __init__(self):
levels = [ # 日志级别
Level(
"TRACE",
_defaults.LOGURU_TRACE_NO,
_defaults.LOGURU_TRACE_COLOR,
_defaults.LOGURU_TRACE_ICON,
),
Level(
"DEBUG",
_defaults.LOGURU_DEBUG_NO,
_defaults.LOGURU_DEBUG_COLOR,
_defaults.LOGURU_DEBUG_ICON,
),
Level(
"INFO",
_defaults.LOGURU_INFO_NO,
_defaults.LOGURU_INFO_COLOR,
_defaults.LOGURU_INFO_ICON,
),
Level(
"SUCCESS",
_defaults.LOGURU_SUCCESS_NO,
_defaults.LOGURU_SUCCESS_COLOR,
_defaults.LOGURU_SUCCESS_ICON,
),
Level(
"WARNING",
_defaults.LOGURU_WARNING_NO,
_defaults.LOGURU_WARNING_COLOR,
_defaults.LOGURU_WARNING_ICON,
),
Level(
"ERROR",
_defaults.LOGURU_ERROR_NO,
_defaults.LOGURU_ERROR_COLOR,
_defaults.LOGURU_ERROR_ICON,
),
Level(
"CRITICAL",
_defaults.LOGURU_CRITICAL_NO,
_defaults.LOGURU_CRITICAL_COLOR,
_defaults.LOGURU_CRITICAL_ICON,
),
]
self.levels = {level.name: level for level in levels}
self.levels_ansi_codes = {
name: Colorizer.ansify(level.color) for name, level in self.levels.items()
}
self.levels_ansi_codes[None] = ""
self.handlers_count = itertools.count()
self.handlers = {}
self.extra = {}
self.patcher = None
self.min_level = float("inf")
self.enabled = {}
self.activation_list = []
self.activation_none = True
self.lock = create_logger_lock()
def __getstate__(self):
state = self.__dict__.copy()
state["lock"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = create_logger_lock()
class Logger:
"""An object to dispatch logging messages to configured handlers.
The |Logger| is the core object of ``loguru``, every logging configuration and usage pass
through a call to one of its methods. There is only one logger, so there is no need to retrieve
one before usage.
Once the ``logger`` is imported, it can be used to write messages about events happening in your
code. By reading the output logs of your application, you gain a better understanding of the
flow of your program and you more easily track and debug unexpected behaviors.
Handlers to which the logger sends log messages are added using the |add| method. Note that you
can use the |Logger| right after import as it comes pre-configured (logs are emitted to
|sys.stderr| by default). Messages can be logged with different severity levels and they can be
formatted using curly braces (it uses |str.format| under the hood).
When a message is logged, a "record" is associated with it. This record is a dict which contains
information about the logging context: time, function, file, line, thread, level... It also
contains the ``__name__`` of the module, this is why you don't need named loggers.
You should not instantiate a |Logger| by yourself, use ``from loguru import logger`` instead.
"""
def __init__(self, core, exception, depth, record, lazy, colors, raw, capture, patcher, extra):
self._core = core
self._options = (exception, depth, record, lazy, colors, raw, capture, patcher, extra)
def __repr__(self):
return "<loguru.logger handlers=%r>" % list(self._core.handlers.values())
def add(
self,
sink,
*,
level=_defaults.LOGURU_LEVEL,
format=_defaults.LOGURU_FORMAT,
filter=_defaults.LOGURU_FILTER,
colorize=_defaults.LOGURU_COLORIZE,
serialize=_defaults.LOGURU_SERIALIZE,
backtrace=_defaults.LOGURU_BACKTRACE,
diagnose=_defaults.LOGURU_DIAGNOSE,
enqueue=_defaults.LOGURU_ENQUEUE,
catch=_defaults.LOGURU_CATCH,
**kwargs
):
r"""Add a handler sending log messages to a sink adequately configured.
Parameters
----------
sink 可以传入一个 file 对象,例如 sys.stderr 或者 open('file.log', 'w') 都可以。
sink 可以直接传入一个 str 字符串或者 pathlib.Path 对象,其实就是代表文件路径的,如果识别到是这种类型,它会自动创建对应路径的日志文件并将日志输出进去。
sink 可以是一个方法,可以自行定义输出实现。
sink 可以是一个 logging 模块的 Handler,比如 FileHandler、StreamHandler 等等,这样就可以实现自定义 Handler 的配置。
sink 还可以是一个自定义的类
sink : |file-like object|_, |str|, |Path|, |callable|_, |coroutine function|_ or |Handler|
An object in charge of receiving formatted logging messages and propagating them to an
appropriate endpoint.
level : |int| or |str|, optional
The minimum severity level from which logged messages should be sent to the sink.
format : |str| or |callable|_, optional
The template used to format logged messages before being sent to the sink.
filter : |callable|_, |str| or |dict|, optional
A directive optionally used to decide for each logged message whether it should be sent
to the sink or not.
colorize : |bool|, optional
Whether the color markups contained in the formatted message should be converted to ansi
codes for terminal coloration, or stripped otherwise. If ``None``, the choice is
automatically made based on the sink being a tty or not.
serialize : |bool|, optional
Whether the logged message and its records should be first converted to a JSON string
before being sent to the sink.
backtrace : |bool|, optional
Whether the exception trace formatted should be extended upward, beyond the catching
point, to show the full stacktrace which generated the error.
diagnose : |bool|, optional
Whether the exception trace should display the variables values to eases the debugging.
This should be set to ``False`` in production to avoid leaking sensitive data.
enqueue : |bool|, optional
Whether the messages to be logged should first pass through a multiprocess-safe queue
before reaching the sink. This is useful while logging to a file through multiple
processes. This also has the advantage of making logging calls non-blocking.
catch : |bool|, optional
Whether errors occurring while sink handles logs messages should be automatically
caught. If ``True``, an exception message is displayed on |sys.stderr| but the exception
is not propagated to the caller, preventing your app to crash.
**kwargs
Additional parameters that are only valid to configure a coroutine or file sink (see
below).
If and only if the sink is a coroutine function, the following parameter applies:
Parameters
----------
loop : |AbstractEventLoop|, optional
The event loop in which the asynchronous logging task will be scheduled and executed. If
``None``, the loop used is the one returned by |asyncio.get_running_loop| at the time of
the logging call (task is discarded if there is no loop currently running).
If and only if the sink is a file path, the following parameters apply:
Parameters
----------
rotation : |str|, |int|, |time|, |timedelta| or |callable|_, optional
A condition indicating whenever the current logged file should be closed and a new one
started.
retention : |str|, |int|, |timedelta| or |callable|_, optional
A directive filtering old files that should be removed during rotation or end of
program.
compression : |str| or |callable|_, optional
A compression or archive format to which log files should be converted at closure.
delay : |bool|, optional
Whether the file should be created as soon as the sink is configured, or delayed until
first logged message. It defaults to ``False``.
mode : |str|, optional
The opening mode as for built-in |open| function. It defaults to ``"a"`` (open the
file in appending mode).
buffering : |int|, optional
The buffering policy as for built-in |open| function. It defaults to ``1`` (line
buffered file).
encoding : |str|, optional
The file encoding as for built-in |open| function. It defaults to ``"utf8"``.
**kwargs
Others parameters are passed to the built-in |open| function.
Returns
-------
:class:`int`
An identifier associated with the added sink and which should be used to
|remove| it.
Raises
------
ValueError
If any of the arguments passed to configure the sink is invalid.
Notes
-----
Extended summary follows.
.. _sink:
.. rubric:: The sink parameter
The ``sink`` handles incoming log messages and proceed to their writing somewhere and
somehow. A sink can take many forms:
- A |file-like object|_ like ``sys.stderr`` or ``open("somefile.log", "w")``. Anything with
a ``.write()`` method is considered as a file-like object. Custom handlers may also
implement ``flush()`` (called after each logged message), ``stop()`` (called at sink
termination) and ``complete()`` (awaited by the eponymous method).
- A file path as |str| or |Path|. It can be parametrized with some additional parameters,
see below.
- A |callable|_ (such as a simple function) like ``lambda msg: print(msg)``. This
allows for logging procedure entirely defined by user preferences and needs.
- A asynchronous |coroutine function|_ defined with the ``async def`` statement. The
coroutine object returned by such function will be added to the event loop using
|loop.create_task|. The tasks should be awaited before ending the loop by using
|complete|.
- A built-in |Handler| like ``logging.StreamHandler``. In such a case, the `Loguru` records
are automatically converted to the structure expected by the |logging| module.
Note that the logging functions are not `reentrant`_. This means you should avoid using
the ``logger`` inside any of your sinks or from within |signal| handlers. Otherwise, you
may face deadlock if the module's sink was not explicitly disabled.
.. _message:
.. rubric:: The logged message
The logged message passed to all added sinks is nothing more than a string of the
formatted log, to which a special attribute is associated: the ``.record`` which is a dict
containing all contextual information possibly needed (see below).
Logged messages are formatted according to the ``format`` of the added sink. This format
is usually a string containing braces fields to display attributes from the record dict.
If fine-grained control is needed, the ``format`` can also be a function which takes the
record as parameter and return the format template string. However, note that in such a
case, you should take care of appending the line ending and exception field to the returned
format, while ``"\n{exception}"`` is automatically appended for convenience if ``format`` is
a string.
The ``filter`` attribute can be used to control which messages are effectively passed to the
sink and which one are ignored. A function can be used, accepting the record as an
argument, and returning ``True`` if the message should be logged, ``False`` otherwise. If
a string is used, only the records with the same ``name`` and its children will be allowed.
One can also pass a ``dict`` mapping module names to minimum required level. In such case,
each log record will search for it's closest parent in the ``dict`` and use the associated
level as the filter. The ``dict`` values can be ``int`` severity, ``str`` level name or
``True`` and ``False`` to respectively authorize and discard all module logs
unconditionally. In order to set a default level, the ``""`` module name should be used as
it is the parent of all modules (it does not suppress global ``level`` threshold, though).
Note that while calling a logging method, the keyword arguments (if any) are automatically
added to the ``extra`` dict for convenient contextualization (in addition to being used for
formatting).
.. _levels:
.. rubric:: The severity levels
Each logged message is associated with a severity level. These levels make it possible to
prioritize messages and to choose the verbosity of the logs according to usages. For
example, it allows to display some debugging information to a developer, while hiding it to
the end user running the application.
The ``level`` attribute of every added sink controls the minimum threshold from which log
messages are allowed to be emitted. While using the ``logger``, you are in charge of
configuring the appropriate granularity of your logs. It is possible to add even more custom
levels by using the |level| method.
Here are the standard levels with their default severity value, each one is associated with
a logging method of the same name:
+----------------------+------------------------+------------------------+
| Level name | Severity value | Logger method |
+======================+========================+========================+
| ``TRACE`` | 5 | |logger.trace| |
+----------------------+------------------------+------------------------+
| ``DEBUG`` | 10 | |logger.debug| |
+----------------------+------------------------+------------------------+
| ``INFO`` | 20 | |logger.info| |
+----------------------+------------------------+------------------------+
| ``SUCCESS`` | 25 | |logger.success| |
+----------------------+------------------------+------------------------+
| ``WARNING`` | 30 | |logger.warning| |
+----------------------+------------------------+------------------------+
| ``ERROR`` | 40 | |logger.error| |
+----------------------+------------------------+------------------------+
| ``CRITICAL`` | 50 | |logger.critical| |
+----------------------+------------------------+------------------------+
.. _record:
.. rubric:: The record dict
The record is just a Python dict, accessible from sinks by ``message.record``. It contains
all contextual information of the logging call (time, function, file, line, level, etc.).
Each of its key can be used in the handler's ``format`` so the corresponding value is
properly displayed in the logged message (e.g. ``"{level}"`` -> ``"INFO"``). Some record's
values are objects with two or more attributes, these can be formatted with ``"{key.attr}"``
(``"{key}"`` would display one by default). `Formatting directives`_ like ``"{key: >3}"``
also works and is particularly useful for time (see below).
+------------+---------------------------------+----------------------------+
| Key | Description | Attributes |
+============+=================================+============================+
| elapsed | The time elapsed since the | See |timedelta| |
| | start of the program | |
+------------+---------------------------------+----------------------------+
| exception | The formatted exception if any, | ``type``, ``value``, |
| | ``None`` otherwise | ``traceback`` |
+------------+---------------------------------+----------------------------+
| extra | The dict of attributes | None |
| | bound by the user (see |bind|) | |
+------------+---------------------------------+----------------------------+
| file | The file where the logging call | ``name`` (default), |
| | was made | ``path`` |
+------------+---------------------------------+----------------------------+
| function | The function from which the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| level | The severity used to log the | ``name`` (default), |
| | message | ``no``, ``icon`` |
+------------+---------------------------------+----------------------------+
| line | The line number in the source | None |
| | code | |
+------------+---------------------------------+----------------------------+
| message | The logged message (not yet | None |
| | formatted) | |
+------------+---------------------------------+----------------------------+
| module | The module where the logging | None |
| | call was made | |
+------------+---------------------------------+----------------------------+
| name | The ``__name__`` where the | None |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| process | The process in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| thread | The thread in which the | ``name``, ``id`` (default) |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
| time | The aware local time when the | See |datetime| |
| | logging call was made | |
+------------+---------------------------------+----------------------------+
.. _time:
.. rubric:: The time formatting
To use your favorite time representation, you can set it directly in the time formatter
specifier of your handler format, like for example ``format="{time:HH:mm:ss} {message}"``.
Note that this datetime represents your local time, and it is also made timezone-aware,
so you can display the UTC offset to avoid ambiguities.
The time field can be formatted using more human-friendly tokens. These constitute a subset
of the one used by the `Pendulum`_ library of `@sdispater`_. To escape a token, just add
square brackets around it, for example ``"[YY]"`` would display literally ``"YY"``.
If you prefer to display UTC rather than local time, you can add ``"!UTC"`` at the very end
of the time format, like ``{time:HH:mm:ss!UTC}``. Doing so will convert the ``datetime``
to UTC before formatting.
If no time formatter specifier is used, like for example if ``format="{time} {message}"``,
the default one will use ISO 8601.
+------------------------+---------+----------------------------------------+
| | Token | Output |
+========================+=========+========================================+
| Year | YYYY | 2000, 2001, 2002 ... 2012, 2013 |
| +---------+----------------------------------------+
| | YY | 00, 01, 02 ... 12, 13 |
+------------------------+---------+----------------------------------------+
| Quarter | Q | 1 2 3 4 |
+------------------------+---------+----------------------------------------+
| Month | MMMM | January, February, March ... |
| +---------+----------------------------------------+
| | MMM | Jan, Feb, Mar ... |
| +---------+----------------------------------------+
| | MM | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | M | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Day of Year | DDDD | 001, 002, 003 ... 364, 365 |
| +---------+----------------------------------------+
| | DDD | 1, 2, 3 ... 364, 365 |
+------------------------+---------+----------------------------------------+
| Day of Month | DD | 01, 02, 03 ... 30, 31 |
| +---------+----------------------------------------+
| | D | 1, 2, 3 ... 30, 31 |
+------------------------+---------+----------------------------------------+
| Day of Week | dddd | Monday, Tuesday, Wednesday ... |
| +---------+----------------------------------------+
| | ddd | Mon, Tue, Wed ... |
| +---------+----------------------------------------+
| | d | 0, 1, 2 ... 6 |
+------------------------+---------+----------------------------------------+
| Days of ISO Week | E | 1, 2, 3 ... 7 |
+------------------------+---------+----------------------------------------+
| Hour | HH | 00, 01, 02 ... 23, 24 |
| +---------+----------------------------------------+
| | H | 0, 1, 2 ... 23, 24 |
| +---------+----------------------------------------+
| | hh | 01, 02, 03 ... 11, 12 |
| +---------+----------------------------------------+
| | h | 1, 2, 3 ... 11, 12 |
+------------------------+---------+----------------------------------------+
| Minute | mm | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | m | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Second | ss | 00, 01, 02 ... 58, 59 |
| +---------+----------------------------------------+
| | s | 0, 1, 2 ... 58, 59 |
+------------------------+---------+----------------------------------------+
| Fractional Second | S | 0 1 ... 8 9 |
| +---------+----------------------------------------+
| | SS | 00, 01, 02 ... 98, 99 |
| +---------+----------------------------------------+
| | SSS | 000 001 ... 998 999 |
| +---------+----------------------------------------+
| | SSSS... | 000[0..] 001[0..] ... 998[0..] 999[0..]|
| +---------+----------------------------------------+
| | SSSSSS | 000000 000001 ... 999998 999999 |
+------------------------+---------+----------------------------------------+
| AM / PM | A | AM, PM |
+------------------------+---------+----------------------------------------+
| Timezone | Z | -07:00, -06:00 ... +06:00, +07:00 |
| +---------+----------------------------------------+
| | ZZ | -0700, -0600 ... +0600, +0700 |
| +---------+----------------------------------------+
| | zz | EST CST ... MST PST |
+------------------------+---------+----------------------------------------+
| Seconds timestamp | X | 1381685817, 1234567890.123 |
+------------------------+---------+----------------------------------------+
| Microseconds timestamp | x | 1234567890123 |
+------------------------+---------+----------------------------------------+
.. _file:
.. rubric:: The file sinks
If the sink is a |str| or a |Path|, the corresponding file will be opened for writing logs.
The path can also contain a special ``"{time}"`` field that will be formatted with the
current date at file creation.
The ``rotation`` check is made before logging each message. If there is already an existing
file with the same name that the file to be created, then the existing file is renamed by
appending the date to its basename to prevent file overwriting. This parameter accepts:
- an |int| which corresponds to the maximum file size in bytes before that the current
logged file is closed and a new one started over.
- a |timedelta| which indicates the frequency of each new rotation.
- a |time| which specifies the hour when the daily rotation should occur.
- a |str| for human-friendly parametrization of one of the previously enumerated types.
Examples: ``"100 MB"``, ``"0.5 GB"``, ``"1 month 2 weeks"``, ``"4 days"``, ``"10h"``,
``"monthly"``, ``"18:00"``, ``"sunday"``, ``"w0"``, ``"monday at 12:00"``, ...
- a |callable|_ which will be invoked before logging. It should accept two arguments: the
logged message and the file object, and it should return ``True`` if the rotation should
happen now, ``False`` otherwise.
The ``retention`` occurs at rotation or at sink stop if rotation is ``None``. Files are
selected if they match the pattern ``"basename(.*).ext(.*)"`` (possible time fields are
beforehand replaced with ``.*``) based on the sink file. This parameter accepts:
- an |int| which indicates the number of log files to keep, while older files are removed.
- a |timedelta| which specifies the maximum age of files to keep.
- a |str| for human-friendly parametrization of the maximum age of files to keep.
Examples: ``"1 week, 3 days"``, ``"2 months"``, ...
- a |callable|_ which will be invoked before the retention process. It should accept the
list of log files as argument and process to whatever it wants (moving files, removing
them, etc.).
The ``compression`` happens at rotation or at sink stop if rotation is ``None``. This
parameter accepts:
- a |str| which corresponds to the compressed or archived file extension. This can be one
of: ``"gz"``, ``"bz2"``, ``"xz"``, ``"lzma"``, ``"tar"``, ``"tar.gz"``, ``"tar.bz2"``,
``"tar.xz"``, ``"zip"``.
- a |callable|_ which will be invoked before file termination. It should accept the path of
the log file as argument and process to whatever it wants (custom compression, network
sending, removing it, etc.).
Either way, if you use a custom function designed according to your preferences, you must be
very careful not to use the ``logger`` within your function. Otherwise, there is a risk that
your program hang because of a deadlock.
.. _color:
.. rubric:: The color markups
To add colors to your logs, you just have to enclose your format string with the appropriate
tags (e.g. ``<red>some message</red>``). These tags are automatically removed if the sink
doesn't support ansi codes. For convenience, you can use ``</>`` to close the last opening
tag without repeating its name (e.g. ``<red>another message</>``).
The special tag ``<level>`` (abbreviated with ``<lvl>``) is transformed according to
the configured color of the logged message level.
Tags which are not recognized will raise an exception during parsing, to inform you about
possible misuse. If you wish to display a markup tag literally, you can escape it by
prepending a ``\`` like for example ``\<blue>``. If, for some reason, you need to escape a
string programmatically, note that the regex used internally to parse markup tags is
``r"\\?</?((?:[fb]g\s)?[^<>\s]*)>"``.
Note that when logging a message with ``opt(colors=True)``, color tags present in the
formatting arguments (``args`` and ``kwargs``) are completely ignored. This is important if
you need to log strings containing markups that might interfere with the color tags (in this
case, do not use f-string).
Here are the available tags (note that compatibility may vary depending on terminal):
+------------------------------------+--------------------------------------+
| Color (abbr) | Styles (abbr) |
+====================================+======================================+
| Black (k) | Bold (b) |
+------------------------------------+--------------------------------------+
| Blue (e) | Dim (d) |
+------------------------------------+--------------------------------------+
| Cyan (c) | Normal (n) |
+------------------------------------+--------------------------------------+
| Green (g) | Italic (i) |
+------------------------------------+--------------------------------------+
| Magenta (m) | Underline (u) |
+------------------------------------+--------------------------------------+
| Red (r) | Strike (s) |
+------------------------------------+--------------------------------------+
| White (w) | Reverse (v) |
+------------------------------------+--------------------------------------+
| Yellow (y) | Blink (l) |
+------------------------------------+--------------------------------------+
| | Hide (h) |
+------------------------------------+--------------------------------------+
Usage:
+-----------------+-------------------------------------------------------------------+
| Description | Examples |
| +---------------------------------+---------------------------------+
| | Foreground | Background |
+=================+=================================+=================================+
| Basic colors | ``<red>``, ``<r>`` | ``<GREEN>``, ``<G>`` |
+-----------------+---------------------------------+---------------------------------+
| Light colors | ``<light-blue>``, ``<le>`` | ``<LIGHT-CYAN>``, ``<LC>`` |
+-----------------+---------------------------------+---------------------------------+
| 8-bit colors | ``<fg 86>``, ``<fg 255>`` | ``<bg 42>``, ``<bg 9>`` |
+-----------------+---------------------------------+---------------------------------+
| Hex colors | ``<fg #00005f>``, ``<fg #EE1>`` | ``<bg #AF5FD7>``, ``<bg #fff>`` |
+-----------------+---------------------------------+---------------------------------+
| RGB colors | ``<fg 0,95,0>`` | ``<bg 72,119,65>`` |
+-----------------+---------------------------------+---------------------------------+
| Stylizing | ``<bold>``, ``<b>``, ``<underline>``, ``<u>`` |
+-----------------+-------------------------------------------------------------------+
.. _env:
.. rubric:: The environment variables
The default values of sink parameters can be entirely customized. This is particularly
useful if you don't like the log format of the pre-configured sink.
Each of the |add| default parameter can be modified by setting the ``LOGURU_[PARAM]``
environment variable. For example on Linux: ``export LOGURU_FORMAT="{time} - {message}"``
or ``export LOGURU_DIAGNOSE=NO``.
The default levels' attributes can also be modified by setting the ``LOGURU_[LEVEL]_[ATTR]``
environment variable. For example, on Windows: ``setx LOGURU_DEBUG_COLOR "<blue>"``
or ``setx LOGURU_TRACE_ICON "🚀"``. If you use the ``set`` command, do not include quotes
but escape special symbol as needed, e.g. ``set LOGURU_DEBUG_COLOR=^<blue^>``.
If you want to disable the pre-configured sink, you can set the ``LOGURU_AUTOINIT``
variable to ``False``.
On Linux, you will probably need to edit the ``~/.profile`` file to make this persistent. On
Windows, don't forget to restart your terminal for the change to be taken into account.
Examples
--------
>>> logger.add(sys.stdout, format="{time} - {level} - {message}", filter="sub.module")
>>> logger.add("file_{time}.log", level="TRACE", rotation="100 MB")
>>> def debug_only(record):
... return record["level"].name == "DEBUG"
...
>>> logger.add("debug.log", filter=debug_only) # Other levels are filtered out
>>> def my_sink(message):
... record = message.record
... update_db(message, time=record["time"], level=record["level"])
...
>>> logger.add(my_sink)
>>> level_per_module = {
... "": "DEBUG",
... "third.lib": "WARNING",
... "anotherlib": False
... }
>>> logger.add(lambda m: print(m, end=""), filter=level_per_module, level=0)
>>> async def publish(message):
... await api.post(message)
...
>>> logger.add(publish, serialize=True)
>>> from logging import StreamHandler
>>> logger.add(StreamHandler(sys.stderr), format="{message}")
>>> class RandomStream:
... def __init__(self, seed, threshold):
... self.threshold = threshold
... random.seed(seed)
... def write(self, message):
... if random.random() > self.threshold:
... print(message)
...
>>> stream_object = RandomStream(seed=12345, threshold=0.25)
>>> logger.add(stream_object, level="INFO")
"""
with self._core.lock:
handler_id = next(self._core.handlers_count)
error_interceptor = ErrorInterceptor(catch, handler_id)
if colorize is None and serialize:
colorize = False
if isinstance(sink, (str, PathLike)):
path = sink
name = "'%s'" % path
if colorize is None:
colorize = False
wrapped_sink = FileSink(path, **kwargs)
kwargs = {}
encoding = wrapped_sink.encoding
terminator = "\n"
exception_prefix = ""
elif hasattr(sink, "write") and callable(sink.write):
name = getattr(sink, "name", None) or repr(sink)
if colorize is None:
colorize = _colorama.should_colorize(sink)
if colorize is True and _colorama.should_wrap(sink):
stream = _colorama.wrap(sink)
else:
stream = sink
wrapped_sink = StreamSink(stream)
encoding = getattr(sink, "encoding", None)
terminator = "\n"
exception_prefix = ""
elif isinstance(sink, logging.Handler):
name = repr(sink)
if colorize is None:
colorize = False
wrapped_sink = StandardSink(sink)
encoding = getattr(sink, "encoding", None)
terminator = ""
exception_prefix = "\n"
elif iscoroutinefunction(sink) or iscoroutinefunction(getattr(sink, "__call__", None)):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
loop = kwargs.pop("loop", None)
# The worker thread needs an event loop, it can't create a new one internally because it
# has to be accessible by the user while calling "complete()", instead we use the global
# one when the sink is added. If "enqueue=False" the event loop is dynamically retrieved
# at each logging call, which is much more convenient. However, coroutine can't access
# running loop in Python 3.5.2 and earlier versions, see python/asyncio#452.
if enqueue and loop is None:
try:
loop = _asyncio_loop.get_running_loop()
except RuntimeError as e:
raise ValueError(
"An event loop is required to add a coroutine sink with `enqueue=True`, "
"but but none has been passed as argument and none is currently running."
) from e
coro = sink if iscoroutinefunction(sink) else sink.__call__
wrapped_sink = AsyncSink(coro, loop, error_interceptor)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
elif callable(sink):
name = getattr(sink, "__name__", None) or repr(sink)
if colorize is None:
colorize = False
wrapped_sink = CallableSink(sink)
encoding = "utf8"
terminator = "\n"
exception_prefix = ""
else:
raise TypeError("Cannot log to objects of type '%s'" % type(sink).__name__)
if kwargs:
raise TypeError("add() got an unexpected keyword argument '%s'" % next(iter(kwargs)))
if filter is None:
filter_func = None
elif filter == "":
filter_func = _filters.filter_none
elif isinstance(filter, str):
parent = filter + "."
length = len(parent)
filter_func = functools.partial(_filters.filter_by_name, parent=parent, length=length)
elif isinstance(filter, dict):
level_per_module = {}
for module, level_ in filter.items():
if module is not None and not isinstance(module, str):
raise TypeError(
"The filter dict contains an invalid module, "
"it should be a string (or None), not: '%s'" % type(module).__name__
)
if level_ is False:
levelno_ = False
elif level_ is True:
levelno_ = 0
elif isinstance(level_, str):
try:
levelno_ = self.level(level_).no
except ValueError:
raise ValueError(
"The filter dict contains a module '%s' associated to a level name "
"which does not exist: '%s'" % (module, level_)
)
elif isinstance(level_, int):
levelno_ = level_
else:
raise TypeError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be an integer, a string or a boolean, not: '%s'"
% (module, type(level_).__name__)
)
if levelno_ < 0:
raise ValueError(
"The filter dict contains a module '%s' associated to an invalid level, "
"it should be a positive integer, not: '%d'" % (module, levelno_)
)
level_per_module[module] = levelno_
filter_func = functools.partial(
_filters.filter_by_level, level_per_module=level_per_module
)
elif callable(filter):
if filter == builtins.filter:
raise ValueError(
"The built-in 'filter()' function cannot be used as a 'filter' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
filter_func = filter
else:
raise TypeError(
"Invalid filter, it should be a function, a string or a dict, not: '%s'"
% type(filter).__name__
)
if isinstance(level, str):
levelno = self.level(level).no
elif isinstance(level, int):
levelno = level
else:
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'"
% type(level).__name__
)
if levelno < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % levelno
)
if isinstance(format, str):
try:
formatter = Colorizer.prepare_format(format + terminator + "{exception}")
except ValueError as e:
raise ValueError(
"Invalid format, color markups could not be parsed correctly"
) from e
is_formatter_dynamic = False
elif callable(format):
if format == builtins.format:
raise ValueError(
"The built-in 'format()' function cannot be used as a 'format' parameter, "
"this is most likely a mistake (please double-check the arguments passed "
"to 'logger.add()')."
)
formatter = format
is_formatter_dynamic = True
else:
raise TypeError(
"Invalid format, it should be a string or a function, not: '%s'"
% type(format).__name__
)
if not isinstance(encoding, str):
encoding = "ascii"
with self._core.lock:
exception_formatter = ExceptionFormatter(
colorize=colorize,
encoding=encoding,
diagnose=diagnose,
backtrace=backtrace,
hidden_frames_filename=self.catch.__code__.co_filename,
prefix=exception_prefix,
)
handler = Handler(
name=name,
sink=wrapped_sink,
levelno=levelno,
formatter=formatter,
is_formatter_dynamic=is_formatter_dynamic,
filter_=filter_func,
colorize=colorize,
serialize=serialize,
enqueue=enqueue,
id_=handler_id,
error_interceptor=error_interceptor,
exception_formatter=exception_formatter,
levels_ansi_codes=self._core.levels_ansi_codes,
)
handlers = self._core.handlers.copy()
handlers[handler_id] = handler
self._core.min_level = min(self._core.min_level, levelno)
self._core.handlers = handlers
return handler_id
def remove(self, handler_id=None):
"""Remove a previously added handler and stop sending logs to its sink.
Parameters
----------
handler_id : |int| or ``None``
The id of the sink to remove, as it was returned by the |add| method. If ``None``, all
handlers are removed. The pre-configured handler is guaranteed to have the index ``0``.
Raises
------
ValueError
If ``handler_id`` is not ``None`` but there is no active handler with such id.
Examples
--------
>>> i = logger.add(sys.stderr, format="{message}")
>>> logger.info("Logging")
Logging
>>> logger.remove(i)
>>> logger.info("No longer logging")
"""
if not (handler_id is None or isinstance(handler_id, int)):
raise TypeError(
"Invalid handler id, it should be an integer as returned "
"by the 'add()' method (or None), not: '%s'" % type(handler_id).__name__
)
with self._core.lock:
handlers = self._core.handlers.copy()
if handler_id is not None and handler_id not in handlers:
raise ValueError("There is no existing handler with id %d" % handler_id) from None
if handler_id is None:
handler_ids = list(handlers.keys())
else:
handler_ids = [handler_id]
for handler_id in handler_ids:
handler = handlers.pop(handler_id)
# This needs to be done first in case "stop()" raises an exception
levelnos = (h.levelno for h in handlers.values())
self._core.min_level = min(levelnos, default=float("inf"))
self._core.handlers = handlers
handler.stop()
def complete(self):
"""Wait for the end of enqueued messages and asynchronous tasks scheduled by handlers.
This method proceeds in two steps: first it waits for all logging messages added to handlers
with ``enqueue=True`` to be processed, then it returns an object that can be awaited to
finalize all logging tasks added to the event loop by coroutine sinks.
It can be called from non-asynchronous code. This is especially recommended when the
``logger`` is utilized with ``multiprocessing`` to ensure messages put to the internal
queue have been properly transmitted before leaving a child process.
The returned object should be awaited before the end of a coroutine executed by
|asyncio.run| or |loop.run_until_complete| to ensure all asynchronous logging messages are
processed. The function |asyncio.get_running_loop| is called beforehand, only tasks
scheduled in the same loop that the current one will be awaited by the method.
Returns
-------
:term:`awaitable`
An awaitable object which ensures all asynchronous logging calls are completed when
awaited.
Examples
--------
>>> async def sink(message):
... await asyncio.sleep(0.1) # IO processing...
... print(message, end="")
...
>>> async def work():
... logger.info("Start")
... logger.info("End")
... await logger.complete()
...
>>> logger.add(sink)
1
>>> asyncio.run(work())
Start
End
>>> def process():
... logger.info("Message sent from the child")
... logger.complete()
...
>>> logger.add(sys.stderr, enqueue=True)
1
>>> process = multiprocessing.Process(target=process)
>>> process.start()
>>> process.join()
Message sent from the child
"""
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
handler.complete_queue()
class AwaitableCompleter:
def __await__(self_):
with self._core.lock:
handlers = self._core.handlers.copy()
for handler in handlers.values():
yield from handler.complete_async().__await__()
return AwaitableCompleter()
def catch(
self,
exception=Exception,
*,
level="ERROR",
reraise=False,
onerror=None,
exclude=None,
default=None,
message="An error has been caught in function '{record[function]}', "
"process '{record[process].name}' ({record[process].id}), "
"thread '{record[thread].name}' ({record[thread].id}):"
):
"""Return a decorator to automatically log possibly caught error in wrapped function.
This is useful to ensure unexpected exceptions are logged, the entire program can be
wrapped by this method. This is also very useful to decorate |Thread.run| methods while
using threads to propagate errors to the main logger thread.
Note that the visibility of variables values (which uses the great |better_exceptions|_
library from `@Qix-`_) depends on the ``diagnose`` option of each configured sink.
The returned object can also be used as a context manager.
Parameters
----------
exception : |Exception|, optional
The type of exception to intercept. If several types should be caught, a tuple of
exceptions can be used too.
level : |str| or |int|, optional
The level name or severity with which the message should be logged.
reraise : |bool|, optional
Whether the exception should be raised again and hence propagated to the caller.
onerror : |callable|_, optional
A function that will be called if an error occurs, once the message has been logged.
It should accept the exception instance as it sole argument.
exclude : |Exception|, optional
A type of exception (or a tuple of types) that will be purposely ignored and hence
propagated to the caller without being logged.
default : optional
The value to be returned by the decorated function if an error occurred without being
re-raised.
message : |str|, optional
The message that will be automatically logged if an exception occurs. Note that it will
be formatted with the ``record`` attribute.
Returns
-------
:term:`decorator` / :term:`context manager`
An object that can be used to decorate a function or as a context manager to log
exceptions possibly caught.
Examples
--------
>>> @logger.catch
... def f(x):
... 100 / x
...
>>> def g():
... f(10)
... f(0)
...
>>> g()
ERROR - An error has been caught in function 'g', process 'Main' (367), thread 'ch1' (1398):
Traceback (most recent call last):
File "program.py", line 12, in <module>
g()
└ <function g at 0x7f225fe2bc80>
> File "program.py", line 10, in g
f(0)
└ <function f at 0x7f225fe2b9d8>
File "program.py", line 6, in f
100 / x
└ 0
ZeroDivisionError: division by zero
>>> with logger.catch(message="Because we never know..."):
... main() # No exception, no logs
>>> # Use 'onerror' to prevent the program exit code to be 0 (if 'reraise=False') while
>>> # also avoiding the stacktrace to be duplicated on stderr (if 'reraise=True').
>>> @logger.catch(onerror=lambda _: sys.exit(1))
... def main():
... 1 / 0
"""
if callable(exception) and (
not isclass(exception) or not issubclass(exception, BaseException)
):
return self.catch()(exception)
class Catcher:
def __init__(self_, from_decorator):
self_._from_decorator = from_decorator
def __enter__(self_):
return None
def __exit__(self_, type_, value, traceback_):
if type_ is None:
return
if not issubclass(type_, exception):
return False
if exclude is not None and issubclass(type_, exclude):
return False
from_decorator = self_._from_decorator
_, depth, _, *options = self._options
if from_decorator:
depth += 1
catch_options = [(type_, value, traceback_), depth, True] + options
level_id, static_level_no = self._dynamic_level(level)
self._log(level_id, static_level_no, from_decorator, catch_options, message, (), {})
if onerror is not None:
onerror(value)
return not reraise
def __call__(_, function):
catcher = Catcher(True)
if iscoroutinefunction(function):
async def catch_wrapper(*args, **kwargs):
with catcher:
return await function(*args, **kwargs)
return default
elif isgeneratorfunction(function):
def catch_wrapper(*args, **kwargs):
with catcher:
return (yield from function(*args, **kwargs))
return default
else:
def catch_wrapper(*args, **kwargs):
with catcher:
return function(*args, **kwargs)
return default
functools.update_wrapper(catch_wrapper, function)
return catch_wrapper
return Catcher(False)
def opt(
self,
*,
exception=None,
record=False,
lazy=False,
colors=False,
raw=False,
capture=True,
depth=0,
ansi=False
):
r"""Parametrize a logging call to slightly change generated log message.
Note that it's not possible to chain |opt| calls, the last one takes precedence over the
others as it will "reset" the options to their default values.
Parameters
----------
exception : |bool|, |tuple| or |Exception|, optional
If it does not evaluate as ``False``, the passed exception is formatted and added to the
log message. It could be an |Exception| object or a ``(type, value, traceback)`` tuple,
otherwise the exception information is retrieved from |sys.exc_info|.
record : |bool|, optional
If ``True``, the record dict contextualizing the logging call can be used to format the
message by using ``{record[key]}`` in the log message.
lazy : |bool|, optional
If ``True``, the logging call attribute to format the message should be functions which
will be called only if the level is high enough. This can be used to avoid expensive
functions if not necessary.
colors : |bool|, optional
If ``True``, logged message will be colorized according to the markups it possibly
contains.
raw : |bool|, optional
If ``True``, the formatting of each sink will be bypassed and the message will be sent
as is.
capture : |bool|, optional
If ``False``, the ``**kwargs`` of logged message will not automatically populate
the ``extra`` dict (although they are still used for formatting).
depth : |int|, optional
Specify which stacktrace should be used to contextualize the logged message. This is
useful while using the logger from inside a wrapped function to retrieve worthwhile
information.
ansi : |bool|, optional
Deprecated since version 0.4.1: the ``ansi`` parameter will be removed in Loguru 1.0.0,
it is replaced by ``colors`` which is a more appropriate name.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but transforming logged message adequately before
sending.
Examples
--------
>>> try:
... 1 / 0
... except ZeroDivisionError:
... logger.opt(exception=True).debug("Exception logged with debug level:")
...
[18:10:02] DEBUG in '<module>' - Exception logged with debug level:
Traceback (most recent call last, catch point marked):
> File "<stdin>", line 2, in <module>
ZeroDivisionError: division by zero
>>> logger.opt(record=True).info("Current line is: {record[line]}")
[18:10:33] INFO in '<module>' - Current line is: 1
>>> logger.opt(lazy=True).debug("If sink <= DEBUG: {x}", x=lambda: math.factorial(2**5))
[18:11:19] DEBUG in '<module>' - If sink <= DEBUG: 263130836933693530167218012160000000
>>> logger.opt(colors=True).warning("We got a <red>BIG</red> problem")
[18:11:30] WARNING in '<module>' - We got a BIG problem
>>> logger.opt(raw=True).debug("No formatting\n")
No formatting
>>> logger.opt(capture=False).info("Displayed but not captured: {value}", value=123)
[18:11:41] Displayed but not captured: 123
>>> def wrapped():
... logger.opt(depth=1).info("Get parent context")
...
>>> def func():
... wrapped()
...
>>> func()
[18:11:54] DEBUG in 'func' - Get parent context
"""
if ansi:
colors = True
warnings.warn(
"The 'ansi' parameter is deprecated, please use 'colors' instead",
DeprecationWarning,
)
args = self._options[-2:]
return Logger(self._core, exception, depth, record, lazy, colors, raw, capture, *args)
def bind(__self, **kwargs):
"""Bind attributes to the ``extra`` dict of each logged message record.
This is used to add custom context to each logging call.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the ``extra`` dict.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which sends record with the customized ``extra``
dict.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[ip]} - {message}")
>>> class Server:
... def __init__(self, ip):
... self.ip = ip
... self.logger = logger.bind(ip=ip)
... def call(self, message):
... self.logger.info(message)
...
>>> instance_1 = Server("192.168.0.200")
>>> instance_2 = Server("127.0.0.1")
>>> instance_1.call("First instance")
192.168.0.200 - First instance
>>> instance_2.call("Second instance")
127.0.0.1 - Second instance
"""
*options, extra = __self._options
return Logger(__self._core, *options, {**extra, **kwargs})
@contextlib.contextmanager
def contextualize(__self, **kwargs):
"""Bind attributes to the context-local ``extra`` dict while inside the ``with`` block.
Contrary to |bind| there is no ``logger`` returned, the ``extra`` dict is modified in-place
and updated globally. Most importantly, it uses |contextvars| which means that
contextualized values are unique to each threads and asynchronous tasks.
The ``extra`` dict will retrieve its initial state once the context manager is exited.
Parameters
----------
**kwargs
Mapping between keys and values that will be added to the context-local ``extra`` dict.
Returns
-------
:term:`context manager` / :term:`decorator`
A context manager (usable as a decorator too) that will bind the attributes once entered
and restore the initial state of the ``extra`` dict while exited.
Examples
--------
>>> logger.add(sys.stderr, format="{message} | {extra}")
1
>>> def task():
... logger.info("Processing!")
...
>>> with logger.contextualize(task_id=123):
... task()
...
Processing! | {'task_id': 123}
>>> logger.info("Done.")
Done. | {}
"""
with __self._core.lock:
new_context = {**context.get(), **kwargs}
token = context.set(new_context)
try:
yield
finally:
with __self._core.lock:
context.reset(token)
def patch(self, patcher):
"""Attach a function to modify the record dict created by each logging call.
The ``patcher`` may be used to update the record on-the-fly before it's propagated to the
handlers. This allows the "extra" dict to be populated with dynamic values and also permits
advanced modifications of the record emitted while logging a message. The function is called
once before sending the log message to the different handlers.
It is recommended to apply modification on the ``record["extra"]`` dict rather than on the
``record`` dict itself, as some values are used internally by `Loguru`, and modify them may
produce unexpected results.
Parameters
----------
patcher: |callable|_
The function to which the record dict will be passed as the sole argument. This function
is in charge of updating the record in-place, the function does not need to return any
value, the modified record object will be re-used.
Returns
-------
:class:`~Logger`
A logger wrapping the core logger, but which records are passed through the ``patcher``
function before being sent to the added handlers.
Examples
--------
>>> logger.add(sys.stderr, format="{extra[utc]} {message}")
>>> logger = logger.patch(lambda record: record["extra"].update(utc=datetime.utcnow())
>>> logger.info("That's way, you can log messages with time displayed in UTC")
>>> def wrapper(func):
... @functools.wraps(func)
... def wrapped(*args, **kwargs):
... logger.patch(lambda r: r.update(function=func.__name__)).info("Wrapped!")
... return func(*args, **kwargs)
... return wrapped
>>> def recv_record_from_network(pipe):
... record = pickle.loads(pipe.read())
... level, message = record["level"], record["message"]
... logger.patch(lambda r: r.update(record)).log(level, message)
"""
*options, _, extra = self._options
return Logger(self._core, *options, patcher, extra)
def level(self, name, no=None, color=None, icon=None):
"""Add, update or retrieve a logging level.
Logging levels are defined by their ``name`` to which a severity ``no``, an ansi ``color``
tag and an ``icon`` are associated and possibly modified at run-time. To |log| to a custom
level, you should necessarily use its name, the severity number is not linked back to levels
name (this implies that several levels can share the same severity).
To add a new level, its ``name`` and its ``no`` are required. A ``color`` and an ``icon``
can also be specified or will be empty by default.
To update an existing level, pass its ``name`` with the parameters to be changed. It is not
possible to modify the ``no`` of a level once it has been added.
To retrieve level information, the ``name`` solely suffices.
Parameters
----------
name : |str|
The name of the logging level.
no : |int|
The severity of the level to be added or updated.
color : |str|
The color markup of the level to be added or updated.
icon : |str|
The icon of the level to be added or updated.
Returns
-------
``Level``
A |namedtuple| containing information about the level.
Raises
------
ValueError
If there is no level registered with such ``name``.
Examples
--------
>>> level = logger.level("ERROR")
>>> print(level)
Level(name='ERROR', no=40, color='<red><bold>', icon='❌')
>>> logger.add(sys.stderr, format="{level.no} {level.icon} {message}")
1
>>> logger.level("CUSTOM", no=15, color="<blue>", icon="@")
Level(name='CUSTOM', no=15, color='<blue>', icon='@')
>>> logger.log("CUSTOM", "Logging...")
15 @ Logging...
>>> logger.level("WARNING", icon=r"/!\\")
Level(name='WARNING', no=30, color='<yellow><bold>', icon='/!\\\\')
>>> logger.warning("Updated!")
30 /!\\ Updated!
"""
if not isinstance(name, str):
raise TypeError(
"Invalid level name, it should be a string, not: '%s'" % type(name).__name__
)
if no is color is icon is None:
try:
return self._core.levels[name]
except KeyError:
raise ValueError("Level '%s' does not exist" % name) from None
if name not in self._core.levels:
if no is None:
raise ValueError(
"Level '%s' does not exist, you have to create it by specifying a level no"
% name
)
else:
old_color, old_icon = "", " "
elif no is not None:
raise TypeError("Level '%s' already exists, you can't update its severity no" % name)
else:
_, no, old_color, old_icon = self.level(name)
if color is None:
color = old_color
if icon is None:
icon = old_icon
if not isinstance(no, int):
raise TypeError(
"Invalid level no, it should be an integer, not: '%s'" % type(no).__name__
)
if no < 0:
raise ValueError("Invalid level no, it should be a positive integer, not: %d" % no)
ansi = Colorizer.ansify(color)
level = Level(name, no, color, icon)
with self._core.lock:
self._core.levels[name] = level
self._core.levels_ansi_codes[name] = ansi
for handler in self._core.handlers.values():
handler.update_format(name)
return level
def disable(self, name):
"""Disable logging of messages coming from ``name`` module and its children.
Developers of library using `Loguru` should absolutely disable it to avoid disrupting
users with unrelated logs messages.
Note that in some rare circumstances, it is not possible for `Loguru` to
determine the module's ``__name__`` value. In such situation, ``record["name"]`` will be
equal to ``None``, this is why ``None`` is also a valid argument.
Parameters
----------
name : |str| or ``None``
The name of the parent module to disable.
Examples
--------
>>> logger.info("Allowed message by default")
[22:21:55] Allowed message by default
>>> logger.disable("my_library")
>>> logger.info("While publishing a library, don't forget to disable logging")
"""
self._change_activation(name, False)
def enable(self, name):
"""Enable logging of messages coming from ``name`` module and its children.
Logging is generally disabled by imported library using `Loguru`, hence this function
allows users to receive these messages anyway.
To enable all logs regardless of the module they are coming from, an empty string ``""`` can
be passed.
Parameters
----------
name : |str| or ``None``
The name of the parent module to re-allow.
Examples
--------
>>> logger.disable("__main__")
>>> logger.info("Disabled, so nothing is logged.")
>>> logger.enable("__main__")
>>> logger.info("Re-enabled, messages are logged.")
[22:46:12] Re-enabled, messages are logged.
"""
self._change_activation(name, True)
def configure(self, *, handlers=None, levels=None, extra=None, patcher=None, activation=None):
"""Configure the core logger.
It should be noted that ``extra`` values set using this function are available across all
modules, so this is the best way to set overall default values.
Parameters
----------
handlers : |list| of |dict|, optional
A list of each handler to be added. The list should contain dicts of params passed to
the |add| function as keyword arguments. If not ``None``, all previously added
handlers are first removed.
levels : |list| of |dict|, optional
A list of each level to be added or updated. The list should contain dicts of params
passed to the |level| function as keyword arguments. This will never remove previously
created levels.
extra : |dict|, optional
A dict containing additional parameters bound to the core logger, useful to share
common properties if you call |bind| in several of your files modules. If not ``None``,
this will remove previously configured ``extra`` dict.
patcher : |callable|_, optional
A function that will be applied to the record dict of each logged messages across all
modules using the logger. It should modify the dict in-place without returning anything.
The function is executed prior to the one possibly added by the |patch| method. If not
``None``, this will replace previously configured ``patcher`` function.
activation : |list| of |tuple|, optional
A list of ``(name, state)`` tuples which denotes which loggers should be enabled (if
``state`` is ``True``) or disabled (if ``state`` is ``False``). The calls to |enable|
and |disable| are made accordingly to the list order. This will not modify previously
activated loggers, so if you need a fresh start prepend your list with ``("", False)``
or ``("", True)``.
Returns
-------
:class:`list` of :class:`int`
A list containing the identifiers of added sinks (if any).
Examples
--------
>>> logger.configure(
... handlers=[
... dict(sink=sys.stderr, format="[{time}] {message}"),
... dict(sink="file.log", enqueue=True, serialize=True),
... ],
... levels=[dict(name="NEW", no=13, icon="¤", color="")],
... extra={"common_to_all": "default"},
... patcher=lambda record: record["extra"].update(some_value=42),
... activation=[("my_module.secret", False), ("another_library.module", True)],
... )
[1, 2]
>>> # Set a default "extra" dict to logger across all modules, without "bind()"
>>> extra = {"context": "foo"}
>>> logger.configure(extra=extra)
>>> logger.add(sys.stderr, format="{extra[context]} - {message}")
>>> logger.info("Context without bind")
>>> # => "foo - Context without bind"
>>> logger.bind(context="bar").info("Suppress global context")
>>> # => "bar - Suppress global context"
"""
if handlers is not None:
self.remove()
else:
handlers = []
if levels is not None:
for params in levels:
self.level(**params)
if patcher is not None:
with self._core.lock:
self._core.patcher = patcher
if extra is not None:
with self._core.lock:
self._core.extra.clear()
self._core.extra.update(extra)
if activation is not None:
for name, state in activation:
if state:
self.enable(name)
else:
self.disable(name)
return [self.add(**params) for params in handlers]
def _change_activation(self, name, status):
if not (name is None or isinstance(name, str)):
raise TypeError(
"Invalid name, it should be a string (or None), not: '%s'" % type(name).__name__
)
with self._core.lock:
enabled = self._core.enabled.copy()
if name is None:
for n in enabled:
if n is None:
enabled[n] = status
self._core.activation_none = status
self._core.enabled = enabled
return
if name != "":
name += "."
activation_list = [
(n, s) for n, s in self._core.activation_list if n[: len(name)] != name
]
parent_status = next((s for n, s in activation_list if name[: len(n)] == n), None)
if parent_status != status and not (name == "" and status is True):
activation_list.append((name, status))
def modules_depth(x):
return x[0].count(".")
activation_list.sort(key=modules_depth, reverse=True)
for n in enabled:
if n is not None and (n + ".")[: len(name)] == name:
enabled[n] = status
self._core.activation_list = activation_list
self._core.enabled = enabled
@staticmethod
def parse(file, pattern, *, cast={}, chunk=2 ** 16):
"""Parse raw logs and extract each entry as a |dict|.
The logging format has to be specified as the regex ``pattern``, it will then be
used to parse the ``file`` and retrieve each entry based on the named groups present
in the regex.
Parameters
----------
file : |str|, |Path| or |file-like object|_
The path of the log file to be parsed, or an already opened file object.
pattern : |str| or |re.Pattern|_
The regex to use for logs parsing, it should contain named groups which will be included
in the returned dict.
cast : |callable|_ or |dict|, optional
A function that should convert in-place the regex groups parsed (a dict of string
values) to more appropriate types. If a dict is passed, it should be a mapping between
keys of parsed log dict and the function that should be used to convert the associated
value.
chunk : |int|, optional
The number of bytes read while iterating through the logs, this avoids having to load
the whole file in memory.
Yields
------
:class:`dict`
The dict mapping regex named groups to matched values, as returned by |match.groupdict|
and optionally converted according to ``cast`` argument.
Examples
--------
>>> reg = r"(?P<lvl>[0-9]+): (?P<msg>.*)" # If log format is "{level.no} - {message}"
>>> for e in logger.parse("file.log", reg): # A file line could be "10 - A debug message"
... print(e) # => {'lvl': '10', 'msg': 'A debug message'}
>>> caster = dict(lvl=int) # Parse 'lvl' key as an integer
>>> for e in logger.parse("file.log", reg, cast=caster):
... print(e) # => {'lvl': 10, 'msg': 'A debug message'}
>>> def cast(groups):
... if "date" in groups:
... groups["date"] = datetime.strptime(groups["date"], "%Y-%m-%d %H:%M:%S")
...
>>> with open("file.log") as file:
... for log in logger.parse(file, reg, cast=cast):
... print(log["date"], log["something_else"])
"""
if isinstance(file, (str, PathLike)):
should_close = True
fileobj = open(str(file))
elif hasattr(file, "read") and callable(file.read):
should_close = False
fileobj = file
else:
raise TypeError(
"Invalid file, it should be a string path or a file object, not: '%s'"
% type(file).__name__
)
if isinstance(cast, dict):
def cast_function(groups):
for key, converter in cast.items():
if key in groups:
groups[key] = converter(groups[key])
elif callable(cast):
cast_function = cast
else:
raise TypeError(
"Invalid cast, it should be a function or a dict, not: '%s'" % type(cast).__name__
)
try:
regex = re.compile(pattern)
except TypeError:
raise TypeError(
"Invalid pattern, it should be a string or a compiled regex, not: '%s'"
% type(pattern).__name__
) from None
matches = Logger._find_iter(fileobj, regex, chunk)
for match in matches:
groups = match.groupdict()
cast_function(groups)
yield groups
if should_close:
fileobj.close()
@staticmethod
def _find_iter(fileobj, regex, chunk):
buffer = fileobj.read(0)
while 1:
text = fileobj.read(chunk)
buffer += text
matches = list(regex.finditer(buffer))
if not text:
yield from matches
break
if len(matches) > 1:
end = matches[-2].end()
buffer = buffer[end:]
yield from matches[:-1]
def _log(self, level_id, static_level_no, from_decorator, options, message, args, kwargs):
"""
记录日志的统一入口方法
"""
core = self._core
if not core.handlers:
return
(exception, depth, record, lazy, colors, raw, capture, patcher, extra) = options
frame = get_frame(depth + 2)
try:
name = frame.f_globals["__name__"]
except KeyError:
name = None
try:
if not core.enabled[name]:
return
except KeyError:
enabled = core.enabled
if name is None:
status = core.activation_none
enabled[name] = status
if not status:
return
else:
dotted_name = name + "."
for dotted_module_name, status in core.activation_list:
if dotted_name[: len(dotted_module_name)] == dotted_module_name:
if status:
break
enabled[name] = False
return
enabled[name] = True
current_datetime = aware_now()
if level_id is None:
level_icon = " "
level_no = static_level_no
level_name = "Level %d" % level_no
else:
try:
level_name, level_no, _, level_icon = core.levels[level_id]
except KeyError:
raise ValueError("Level '%s' does not exist" % level_id) from None
if level_no < core.min_level:
return
code = frame.f_code
file_path = code.co_filename
file_name = basename(file_path)
thread = current_thread()
process = current_process()
elapsed = current_datetime - start_time
if exception:
if isinstance(exception, BaseException):
type_, value, traceback = (type(exception), exception, exception.__traceback__)
elif isinstance(exception, tuple):
type_, value, traceback = exception
else:
type_, value, traceback = sys.exc_info()
exception = RecordException(type_, value, traceback)
else:
exception = None
log_record = {
"elapsed": elapsed,
"exception": exception,
"extra": {**core.extra, **context.get(), **extra},
"file": RecordFile(file_name, file_path),
"function": code.co_name,
"level": RecordLevel(level_name, level_no, level_icon),
"line": frame.f_lineno,
"message": str(message),
"module": splitext(file_name)[0],
"name": name,
"process": RecordProcess(process.ident, process.name),
"thread": RecordThread(thread.ident, thread.name),
"time": current_datetime,
}
if lazy:
args = [arg() for arg in args]
kwargs = {key: value() for key, value in kwargs.items()}
if capture and kwargs:
log_record["extra"].update(kwargs)
if record:
if "record" in kwargs:
raise TypeError(
"The message can't be formatted: 'record' shall not be used as a keyword "
"argument while logger has been configured with '.opt(record=True)'"
)
kwargs.update(record=log_record)
if colors:
if args or kwargs:
colored_message = Colorizer.prepare_message(message, args, kwargs)
else:
colored_message = Colorizer.prepare_simple_message(str(message))
log_record["message"] = colored_message.stripped
elif args or kwargs:
colored_message = None
# 记录日志的关键
# 自动记录args日志
if "{}" not in message and len(args) > 0:
sign = ""
for i in range(len(args)):
sign += "{} "
message += sign
# 自动记录kwargs日志
if "{}" not in message and len(kwargs) > 0:
sign = ""
for k, v in kwargs.items():
sign += f"{k}=" + "{" + k + "} "
message += " " + sign
# 记录日志
log_record["message"] = message.format(*args, **kwargs)
else:
colored_message = None
if core.patcher:
core.patcher(log_record)
if patcher:
patcher(log_record)
for handler in core.handlers.values():
handler.emit(log_record, level_id, from_decorator, raw, colored_message)
def trace(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'TRACE'``."""
__self._log("TRACE", None, False, __self._options, __message, args, kwargs)
def debug(__self, __message, *args, **kwargs):
"""
记录debug日志
"""
__self._log("DEBUG", None, False, __self._options, __message, args, kwargs)
def info(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'INFO'``."""
__self._log("INFO", None, False, __self._options, __message, args, kwargs)
def success(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'SUCCESS'``."""
__self._log("SUCCESS", None, False, __self._options, __message, args, kwargs)
def warning(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'WARNING'``."""
__self._log("WARNING", None, False, __self._options, __message, args, kwargs)
def error(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'ERROR'``."""
__self._log("ERROR", None, False, __self._options, __message, args, kwargs)
def critical(__self, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``'CRITICAL'``."""
__self._log("CRITICAL", None, False, __self._options, __message, args, kwargs)
def exception(__self, __message, *args, **kwargs):
r"""Convenience method for logging an ``'ERROR'`` with exception information."""
options = (True,) + __self._options[1:]
__self._log("ERROR", None, False, options, __message, args, kwargs)
def log(__self, __level, __message, *args, **kwargs):
r"""Log ``message.format(*args, **kwargs)`` with severity ``level``."""
level_id, static_level_no = __self._dynamic_level(__level)
__self._log(level_id, static_level_no, False, __self._options, __message, args, kwargs)
@staticmethod
@functools.lru_cache(maxsize=32)
def _dynamic_level(level):
if isinstance(level, str):
return (level, None)
if isinstance(level, int):
if level < 0:
raise ValueError(
"Invalid level value, it should be a positive integer, not: %d" % level
)
return (None, level)
raise TypeError(
"Invalid level, it should be an integer or a string, not: '%s'" % type(level).__name__
)
def start(self, *args, **kwargs):
"""Deprecated function to |add| a new handler.
Warnings
--------
.. deprecated:: 0.2.2
``start()`` will be removed in Loguru 1.0.0, it is replaced by ``add()`` which is a less
confusing name.
"""
warnings.warn(
"The 'start()' method is deprecated, please use 'add()' instead", DeprecationWarning
)
return self.add(*args, **kwargs)
def stop(self, *args, **kwargs):
"""Deprecated function to |remove| an existing handler.
Warnings
--------
.. deprecated:: 0.2.2
``stop()`` will be removed in Loguru 1.0.0, it is replaced by ``remove()`` which is a less
confusing name.
"""
warnings.warn(
"The 'stop()' method is deprecated, please use 'remove()' instead", DeprecationWarning
)
return self.remove(*args, **kwargs) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_logger.py | _logger.py |
import re
from calendar import day_abbr, day_name, month_abbr, month_name
from datetime import datetime as datetime_
from datetime import timedelta, timezone
from time import localtime, strftime
tokens = r"H{1,2}|h{1,2}|m{1,2}|s{1,2}|S{1,6}|YYYY|YY|M{1,4}|D{1,4}|Z{1,2}|zz|A|X|x|E|Q|dddd|ddd|d"
pattern = re.compile(r"(?:{0})|\[(?:{0}|!UTC)\]".format(tokens))
class datetime(datetime_):
def __format__(self, spec):
if spec.endswith("!UTC"):
dt = self.astimezone(timezone.utc)
spec = spec[:-4]
else:
dt = self
if not spec:
spec = "%Y-%m-%dT%H:%M:%S.%f%z"
if "%" in spec:
return datetime_.__format__(dt, spec)
year, month, day, hour, minute, second, weekday, yearday, _ = dt.timetuple()
microsecond = dt.microsecond
timestamp = dt.timestamp()
tzinfo = dt.tzinfo or timezone(timedelta(seconds=0))
offset = tzinfo.utcoffset(dt).total_seconds()
sign = ("-", "+")[offset >= 0]
h, m = divmod(abs(offset // 60), 60)
rep = {
"YYYY": "%04d" % year,
"YY": "%02d" % (year % 100),
"Q": "%d" % ((month - 1) // 3 + 1),
"MMMM": month_name[month],
"MMM": month_abbr[month],
"MM": "%02d" % month,
"M": "%d" % month,
"DDDD": "%03d" % yearday,
"DDD": "%d" % yearday,
"DD": "%02d" % day,
"D": "%d" % day,
"dddd": day_name[weekday],
"ddd": day_abbr[weekday],
"d": "%d" % weekday,
"E": "%d" % (weekday + 1),
"HH": "%02d" % hour,
"H": "%d" % hour,
"hh": "%02d" % ((hour - 1) % 12 + 1),
"h": "%d" % ((hour - 1) % 12 + 1),
"mm": "%02d" % minute,
"m": "%d" % minute,
"ss": "%02d" % second,
"s": "%d" % second,
"S": "%d" % (microsecond // 100000),
"SS": "%02d" % (microsecond // 10000),
"SSS": "%03d" % (microsecond // 1000),
"SSSS": "%04d" % (microsecond // 100),
"SSSSS": "%05d" % (microsecond // 10),
"SSSSSS": "%06d" % microsecond,
"A": ("AM", "PM")[hour // 12],
"Z": "%s%02d:%02d" % (sign, h, m),
"ZZ": "%s%02d%02d" % (sign, h, m),
"zz": tzinfo.tzname(dt) or "",
"X": "%d" % timestamp,
"x": "%d" % (int(timestamp) * 1000000 + microsecond),
}
def get(m):
try:
return rep[m.group(0)]
except KeyError:
return m.group(0)[1:-1]
return pattern.sub(get, spec)
def aware_now():
now = datetime.now()
timestamp = now.timestamp()
local = localtime(timestamp)
try:
seconds = local.tm_gmtoff
zone = local.tm_zone
except AttributeError:
offset = datetime.fromtimestamp(timestamp) - datetime.utcfromtimestamp(timestamp)
seconds = offset.total_seconds()
zone = strftime("%Z")
tzinfo = timezone(timedelta(seconds=seconds), zone)
return now.replace(tzinfo=tzinfo) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_datetime.py | _datetime.py |
import datetime as datetime_
import decimal
import glob
import numbers
import os
import shutil
import string
from functools import partial
from . import _string_parsers as string_parsers
from ._ctime_functions import get_ctime, set_ctime
from ._datetime import aware_now, datetime
def generate_rename_path(root, ext, creation_time):
creation_datetime = datetime.fromtimestamp(creation_time)
date = FileDateFormatter(creation_datetime)
renamed_path = "{}.{}{}".format(root, date, ext)
counter = 1
while os.path.exists(renamed_path):
counter += 1
renamed_path = "{}.{}.{}{}".format(root, date, counter, ext)
return renamed_path
class FileDateFormatter:
def __init__(self, datetime=None):
self.datetime = datetime or aware_now()
def __format__(self, spec):
if not spec:
spec = "%Y-%m-%d_%H-%M-%S_%f"
return self.datetime.__format__(spec)
class Compression:
@staticmethod
def add_compress(path_in, path_out, opener, **kwargs):
with opener(path_out, **kwargs) as f_comp:
f_comp.add(path_in, os.path.basename(path_in))
@staticmethod
def write_compress(path_in, path_out, opener, **kwargs):
with opener(path_out, **kwargs) as f_comp:
f_comp.write(path_in, os.path.basename(path_in))
@staticmethod
def copy_compress(path_in, path_out, opener, **kwargs):
with open(path_in, "rb") as f_in:
with opener(path_out, **kwargs) as f_out:
shutil.copyfileobj(f_in, f_out)
@staticmethod
def compression(path_in, ext, compress_function):
path_out = "{}{}".format(path_in, ext)
if os.path.exists(path_out):
creation_time = get_ctime(path_out)
root, ext_before = os.path.splitext(path_in)
renamed_path = generate_rename_path(root, ext_before + ext, creation_time)
os.rename(path_out, renamed_path)
compress_function(path_in, path_out)
os.remove(path_in)
class Retention:
@staticmethod
def retention_count(logs, number):
def key_log(log):
return (-os.stat(log).st_mtime, log)
for log in sorted(logs, key=key_log)[number:]:
os.remove(log)
@staticmethod
def retention_age(logs, seconds):
t = datetime.now().timestamp()
for log in logs:
if os.stat(log).st_mtime <= t - seconds:
os.remove(log)
class Rotation:
@staticmethod
def forward_day(t):
return t + datetime_.timedelta(days=1)
@staticmethod
def forward_weekday(t, weekday):
while True:
t += datetime_.timedelta(days=1)
if t.weekday() == weekday:
return t
@staticmethod
def forward_interval(t, interval):
return t + interval
@staticmethod
def rotation_size(message, file, size_limit):
file.seek(0, 2)
return file.tell() + len(message) > size_limit
class RotationTime:
def __init__(self, step_forward, time_init=None):
self._step_forward = step_forward
self._time_init = time_init
self._limit = None
def __call__(self, message, file):
if self._limit is None:
filepath = os.path.realpath(file.name)
creation_time = get_ctime(filepath)
set_ctime(filepath, creation_time)
start_time = limit = datetime.fromtimestamp(creation_time)
if self._time_init is not None:
limit = limit.replace(
hour=self._time_init.hour,
minute=self._time_init.minute,
second=self._time_init.second,
microsecond=self._time_init.microsecond,
)
if limit <= start_time:
limit = self._step_forward(limit)
self._limit = limit
record_time = message.record["time"].replace(tzinfo=None)
if record_time >= self._limit:
while self._limit <= record_time:
self._limit = self._step_forward(self._limit)
return True
return False
class FileSink:
def __init__(
self,
path,
*,
rotation=None,
retention=None,
compression=None,
delay=False,
mode="a",
buffering=1,
encoding="utf8",
**kwargs
):
self.encoding = encoding
self._kwargs = {**kwargs, "mode": mode, "buffering": buffering, "encoding": self.encoding}
self._path = str(path)
self._glob_patterns = self._make_glob_patterns(self._path)
self._rotation_function = self._make_rotation_function(rotation)
self._retention_function = self._make_retention_function(retention)
self._compression_function = self._make_compression_function(compression)
self._file = None
self._file_path = None
if not delay:
self._initialize_file()
def write(self, message):
if self._file is None:
self._initialize_file()
if self._rotation_function is not None and self._rotation_function(message, self._file):
self._terminate_file(is_rotating=True)
self._file.write(message)
def _prepare_new_path(self):
path = self._path.format_map({"time": FileDateFormatter()})
path = os.path.abspath(path)
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
return path
def _initialize_file(self):
path = self._prepare_new_path()
self._file = open(path, **self._kwargs)
self._file_path = path
def _terminate_file(self, *, is_rotating=False):
old_path = self._file_path
if self._file is not None:
self._file.close()
self._file = None
self._file_path = None
if is_rotating:
new_path = self._prepare_new_path()
if new_path == old_path:
creation_time = get_ctime(old_path)
root, ext = os.path.splitext(old_path)
renamed_path = generate_rename_path(root, ext, creation_time)
os.rename(old_path, renamed_path)
old_path = renamed_path
if is_rotating or self._rotation_function is None:
if self._compression_function is not None and old_path is not None:
self._compression_function(old_path)
if self._retention_function is not None:
logs = {
file
for pattern in self._glob_patterns
for file in glob.glob(pattern)
if os.path.isfile(file)
}
self._retention_function(list(logs))
if is_rotating:
file = open(new_path, **self._kwargs)
set_ctime(new_path, datetime.now().timestamp())
self._file_path = new_path
self._file = file
def stop(self):
self._terminate_file(is_rotating=False)
async def complete(self):
pass
@staticmethod
def _make_glob_patterns(path):
formatter = string.Formatter()
tokens = formatter.parse(path)
escaped = "".join(glob.escape(text) + "*" * (name is not None) for text, name, *_ in tokens)
root, ext = os.path.splitext(escaped)
if not ext:
return [escaped, escaped + ".*"]
return [escaped, escaped + ".*", root + ".*" + ext, root + ".*" + ext + ".*"]
@staticmethod
def _make_rotation_function(rotation):
if rotation is None:
return None
elif isinstance(rotation, str):
size = string_parsers.parse_size(rotation)
if size is not None:
return FileSink._make_rotation_function(size)
interval = string_parsers.parse_duration(rotation)
if interval is not None:
return FileSink._make_rotation_function(interval)
frequency = string_parsers.parse_frequency(rotation)
if frequency is not None:
return Rotation.RotationTime(frequency)
daytime = string_parsers.parse_daytime(rotation)
if daytime is not None:
day, time = daytime
if day is None:
return FileSink._make_rotation_function(time)
if time is None:
time = datetime_.time(0, 0, 0)
step_forward = partial(Rotation.forward_weekday, weekday=day)
return Rotation.RotationTime(step_forward, time)
raise ValueError("Cannot parse rotation from: '%s'" % rotation)
elif isinstance(rotation, (numbers.Real, decimal.Decimal)):
return partial(Rotation.rotation_size, size_limit=rotation)
elif isinstance(rotation, datetime_.time):
return Rotation.RotationTime(Rotation.forward_day, rotation)
elif isinstance(rotation, datetime_.timedelta):
step_forward = partial(Rotation.forward_interval, interval=rotation)
return Rotation.RotationTime(step_forward)
elif callable(rotation):
return rotation
else:
raise TypeError(
"Cannot infer rotation for objects of type: '%s'" % type(rotation).__name__
)
@staticmethod
def _make_retention_function(retention):
if retention is None:
return None
elif isinstance(retention, str):
interval = string_parsers.parse_duration(retention)
if interval is None:
raise ValueError("Cannot parse retention from: '%s'" % retention)
return FileSink._make_retention_function(interval)
elif isinstance(retention, int):
return partial(Retention.retention_count, number=retention)
elif isinstance(retention, datetime_.timedelta):
return partial(Retention.retention_age, seconds=retention.total_seconds())
elif callable(retention):
return retention
else:
raise TypeError(
"Cannot infer retention for objects of type: '%s'" % type(retention).__name__
)
@staticmethod
def _make_compression_function(compression):
if compression is None:
return None
elif isinstance(compression, str):
ext = compression.strip().lstrip(".")
if ext == "gz":
import gzip
compress = partial(Compression.copy_compress, opener=gzip.open, mode="wb")
elif ext == "bz2":
import bz2
compress = partial(Compression.copy_compress, opener=bz2.open, mode="wb")
elif ext == "xz":
import lzma
compress = partial(
Compression.copy_compress, opener=lzma.open, mode="wb", format=lzma.FORMAT_XZ
)
elif ext == "lzma":
import lzma
compress = partial(
Compression.copy_compress, opener=lzma.open, mode="wb", format=lzma.FORMAT_ALONE
)
elif ext == "tar":
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:")
elif ext == "tar.gz":
import gzip
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:gz")
elif ext == "tar.bz2":
import bz2
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:bz2")
elif ext == "tar.xz":
import lzma
import tarfile
compress = partial(Compression.add_compress, opener=tarfile.open, mode="w:xz")
elif ext == "zip":
import zipfile
compress = partial(
Compression.write_compress,
opener=zipfile.ZipFile,
mode="w",
compression=zipfile.ZIP_DEFLATED,
)
else:
raise ValueError("Invalid compression format: '%s'" % ext)
return partial(Compression.compression, ext="." + ext, compress_function=compress)
elif callable(compression):
return compression
else:
raise TypeError(
"Cannot infer compression for objects of type: '%s'" % type(compression).__name__
) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_file_sink.py | _file_sink.py |
import builtins
import inspect
import io
import keyword
import linecache
import os
import re
import sys
import sysconfig
import tokenize
import traceback
class SyntaxHighlighter:
_default_style = {
"comment": "\x1b[30m\x1b[1m{}\x1b[0m",
"keyword": "\x1b[35m\x1b[1m{}\x1b[0m",
"builtin": "\x1b[1m{}\x1b[0m",
"string": "\x1b[36m{}\x1b[0m",
"number": "\x1b[34m\x1b[1m{}\x1b[0m",
"operator": "\x1b[35m\x1b[1m{}\x1b[0m",
"punctuation": "\x1b[1m{}\x1b[0m",
"constant": "\x1b[36m\x1b[1m{}\x1b[0m",
"identifier": "\x1b[1m{}\x1b[0m",
"other": "{}",
}
_builtins = set(dir(builtins))
_constants = {"True", "False", "None"}
_punctation = {"(", ")", "[", "]", "{", "}", ":", ",", ";"}
def __init__(self, style=None):
self._style = style or self._default_style
def highlight(self, source):
style = self._style
row, column = 0, 0
output = ""
for token in self.tokenize(source):
type_, string, start, end, line = token
if type_ == tokenize.NAME:
if string in self._constants:
color = style["constant"]
elif keyword.iskeyword(string):
color = style["keyword"]
elif string in self._builtins:
color = style["builtin"]
else:
color = style["identifier"]
elif type_ == tokenize.OP:
if string in self._punctation:
color = style["punctuation"]
else:
color = style["operator"]
elif type_ == tokenize.NUMBER:
color = style["number"]
elif type_ == tokenize.STRING:
color = style["string"]
elif type_ == tokenize.COMMENT:
color = style["comment"]
else:
color = style["other"]
start_row, start_column = start
_, end_column = end
if start_row != row:
source = source[:column]
row, column = start_row, 0
if type_ != tokenize.ENCODING:
output += line[column:start_column]
output += color.format(string)
column = end_column
output += source[column:]
return output
@staticmethod
def tokenize(source):
# Worth reading: https://www.asmeurer.com/brown-water-python/
source = source.encode("utf-8")
source = io.BytesIO(source)
try:
yield from tokenize.tokenize(source.readline)
except tokenize.TokenError:
return
class ExceptionFormatter:
_default_theme = {
"introduction": "\x1b[33m\x1b[1m{}\x1b[0m",
"cause": "\x1b[1m{}\x1b[0m",
"context": "\x1b[1m{}\x1b[0m",
"dirname": "\x1b[32m{}\x1b[0m",
"basename": "\x1b[32m\x1b[1m{}\x1b[0m",
"line": "\x1b[33m{}\x1b[0m",
"function": "\x1b[35m{}\x1b[0m",
"exception_type": "\x1b[31m\x1b[1m{}\x1b[0m",
"exception_value": "\x1b[1m{}\x1b[0m",
"arrows": "\x1b[36m{}\x1b[0m",
"value": "\x1b[36m\x1b[1m{}\x1b[0m",
}
def __init__(
self,
colorize=False,
backtrace=False,
diagnose=True,
theme=None,
style=None,
max_length=128,
encoding="ascii",
hidden_frames_filename=None,
prefix="",
):
self._colorize = colorize
self._diagnose = diagnose
self._theme = theme or self._default_theme
self._backtrace = backtrace
self._syntax_highlighter = SyntaxHighlighter(style)
self._max_length = max_length
self._encoding = encoding
self._hidden_frames_filename = hidden_frames_filename
self._prefix = prefix
self._lib_dirs = self._get_lib_dirs()
self._pipe_char = self._get_char("\u2502", "|")
self._cap_char = self._get_char("\u2514", "->")
self._catch_point_identifier = " <Loguru catch point here>"
@staticmethod
def _get_lib_dirs():
schemes = sysconfig.get_scheme_names()
names = ["stdlib", "platstdlib", "platlib", "purelib"]
paths = {sysconfig.get_path(name, scheme) for scheme in schemes for name in names}
return [os.path.abspath(path).lower() + os.sep for path in paths if path in sys.path]
def _get_char(self, char, default):
try:
char.encode(self._encoding)
except (UnicodeEncodeError, LookupError):
return default
else:
return char
def _is_file_mine(self, file):
filepath = os.path.abspath(file).lower()
if not filepath.endswith(".py"):
return False
return not any(filepath.startswith(d) for d in self._lib_dirs)
def _extract_frames(self, tb, is_first, *, limit=None, from_decorator=False):
frames, final_source = [], None
if tb is None or (limit is not None and limit <= 0):
return frames, final_source
def is_valid(frame):
return frame.f_code.co_filename != self._hidden_frames_filename
def get_info(frame, lineno):
filename = frame.f_code.co_filename
function = frame.f_code.co_name
source = linecache.getline(filename, lineno).strip()
return filename, lineno, function, source
infos = []
if is_valid(tb.tb_frame):
infos.append((get_info(tb.tb_frame, tb.tb_lineno), tb.tb_frame))
get_parent_only = from_decorator and not self._backtrace
if (self._backtrace and is_first) or get_parent_only:
frame = tb.tb_frame.f_back
while frame:
if is_valid(frame):
infos.insert(0, (get_info(frame, frame.f_lineno), frame))
if get_parent_only:
break
frame = frame.f_back
if infos and not get_parent_only:
(filename, lineno, function, source), frame = infos[-1]
function += self._catch_point_identifier
infos[-1] = ((filename, lineno, function, source), frame)
tb = tb.tb_next
while tb:
if is_valid(tb.tb_frame):
infos.append((get_info(tb.tb_frame, tb.tb_lineno), tb.tb_frame))
tb = tb.tb_next
if limit is not None:
infos = infos[-limit:]
for (filename, lineno, function, source), frame in infos:
final_source = source
if source:
colorize = self._colorize and self._is_file_mine(filename)
lines = []
if colorize:
lines.append(self._syntax_highlighter.highlight(source))
else:
lines.append(source)
if self._diagnose:
relevant_values = self._get_relevant_values(source, frame)
values = self._format_relevant_values(list(relevant_values), colorize)
lines += list(values)
source = "\n ".join(lines)
frames.append((filename, lineno, function, source))
return frames, final_source
def _get_relevant_values(self, source, frame):
value = None
pending = None
is_attribute = False
is_valid_value = False
is_assignment = True
for token in self._syntax_highlighter.tokenize(source):
type_, string, (_, col), *_ = token
if pending is not None:
# Keyword arguments are ignored
if type_ != tokenize.OP or string != "=" or is_assignment:
yield pending
pending = None
if type_ == tokenize.NAME and not keyword.iskeyword(string):
if not is_attribute:
for variables in (frame.f_locals, frame.f_globals):
try:
value = variables[string]
except KeyError:
continue
else:
is_valid_value = True
pending = (col, self._format_value(value))
break
elif is_valid_value:
try:
value = inspect.getattr_static(value, string)
except AttributeError:
is_valid_value = False
else:
yield (col, self._format_value(value))
elif type_ == tokenize.OP and string == ".":
is_attribute = True
is_assignment = False
elif type_ == tokenize.OP and string == ";":
is_assignment = True
is_attribute = False
is_valid_value = False
else:
is_attribute = False
is_valid_value = False
is_assignment = False
if pending is not None:
yield pending
def _format_relevant_values(self, relevant_values, colorize):
for i in reversed(range(len(relevant_values))):
col, value = relevant_values[i]
pipe_cols = [pcol for pcol, _ in relevant_values[:i]]
pre_line = ""
index = 0
for pc in pipe_cols:
pre_line += (" " * (pc - index)) + self._pipe_char
index = pc + 1
pre_line += " " * (col - index)
value_lines = value.split("\n")
for n, value_line in enumerate(value_lines):
if n == 0:
arrows = pre_line + self._cap_char + " "
else:
arrows = pre_line + " " * (len(self._cap_char) + 1)
if colorize:
arrows = self._theme["arrows"].format(arrows)
value_line = self._theme["value"].format(value_line)
yield arrows + value_line
def _format_value(self, v):
try:
v = repr(v)
except Exception:
v = "<unprintable %s object>" % type(v).__name__
max_length = self._max_length
if max_length is not None and len(v) > max_length:
v = v[: max_length - 3] + "..."
return v
def _format_locations(self, frames_lines, *, has_introduction):
prepend_with_new_line = has_introduction
regex = r'^ File "(?P<file>.*?)", line (?P<line>[^,]+)(?:, in (?P<function>.*))?\n'
for frame in frames_lines:
match = re.match(regex, frame)
if match:
file, line, function = match.group("file", "line", "function")
is_mine = self._is_file_mine(file)
if function is not None:
pattern = ' File "{}", line {}, in {}\n'
else:
pattern = ' File "{}", line {}\n'
if self._backtrace and function and function.endswith(self._catch_point_identifier):
function = function[: -len(self._catch_point_identifier)]
pattern = ">" + pattern[1:]
if self._colorize and is_mine:
dirname, basename = os.path.split(file)
if dirname:
dirname += os.sep
dirname = self._theme["dirname"].format(dirname)
basename = self._theme["basename"].format(basename)
file = dirname + basename
line = self._theme["line"].format(line)
function = self._theme["function"].format(function)
if self._diagnose and (is_mine or prepend_with_new_line):
pattern = "\n" + pattern
location = pattern.format(file, line, function)
frame = location + frame[match.end() :]
prepend_with_new_line = is_mine
yield frame
def _format_exception(self, value, tb, *, seen=None, is_first=False, from_decorator=False):
# Implemented from built-in traceback module: https://git.io/fhHKw
exc_type, exc_value, exc_traceback = type(value), value, tb
if seen is None:
seen = set()
seen.add(id(exc_value))
if exc_value:
if exc_value.__cause__ is not None and id(exc_value.__cause__) not in seen:
for text in self._format_exception(
exc_value.__cause__, exc_value.__cause__.__traceback__, seen=seen
):
yield text
cause = "The above exception was the direct cause of the following exception:"
if self._colorize:
cause = self._theme["cause"].format(cause)
if self._diagnose:
yield "\n\n" + cause + "\n\n\n"
else:
yield "\n" + cause + "\n\n"
elif (
exc_value.__context__ is not None
and id(exc_value.__context__) not in seen
and not exc_value.__suppress_context__
):
for text in self._format_exception(
exc_value.__context__, exc_value.__context__.__traceback__, seen=seen
):
yield text
context = "During handling of the above exception, another exception occurred:"
if self._colorize:
context = self._theme["context"].format(context)
if self._diagnose:
yield "\n\n" + context + "\n\n\n"
else:
yield "\n" + context + "\n\n"
try:
tracebacklimit = sys.tracebacklimit
except AttributeError:
tracebacklimit = None
frames, final_source = self._extract_frames(
exc_traceback, is_first, limit=tracebacklimit, from_decorator=from_decorator
)
exception_only = traceback.format_exception_only(exc_type, exc_value)
error_message = exception_only[-1][:-1] # Remove last new line temporarily
if self._colorize:
if ":" in error_message:
exception_type, exception_value = error_message.split(":", 1)
exception_type = self._theme["exception_type"].format(exception_type)
exception_value = self._theme["exception_value"].format(exception_value)
error_message = exception_type + ":" + exception_value
else:
error_message = self._theme["exception_type"].format(error_message)
if self._diagnose and frames:
if issubclass(exc_type, AssertionError) and not str(exc_value) and final_source:
if self._colorize:
final_source = self._syntax_highlighter.highlight(final_source)
error_message += ": " + final_source
error_message = "\n" + error_message
exception_only[-1] = error_message + "\n"
frames_lines = traceback.format_list(frames) + exception_only
has_introduction = bool(frames)
if self._colorize or self._backtrace or self._diagnose:
frames_lines = self._format_locations(frames_lines, has_introduction=has_introduction)
if is_first:
yield self._prefix
if has_introduction:
introduction = "Traceback (most recent call last):"
if self._colorize:
introduction = self._theme["introduction"].format(introduction)
yield introduction + "\n"
yield "".join(frames_lines)
def format_exception(self, type_, value, tb, *, from_decorator=False):
yield from self._format_exception(value, tb, is_first=True, from_decorator=from_decorator) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_better_exceptions.py | _better_exceptions.py |
import functools
import json
import multiprocessing
import os
from threading import Thread
from ._colorizer import Colorizer
from ._locks_machinery import create_handler_lock
def prepare_colored_format(format_, ansi_level):
colored = Colorizer.prepare_format(format_)
return colored, colored.colorize(ansi_level)
def prepare_stripped_format(format_):
colored = Colorizer.prepare_format(format_)
return colored.strip()
def memoize(function):
return functools.lru_cache(maxsize=64)(function)
class Message(str):
__slots__ = ("record",)
class Handler:
def __init__(
self,
*,
sink,
name,
levelno,
formatter,
is_formatter_dynamic,
filter_,
colorize,
serialize,
enqueue,
error_interceptor,
exception_formatter,
id_,
levels_ansi_codes
):
self._name = name
self._sink = sink
self._levelno = levelno
self._formatter = formatter
self._is_formatter_dynamic = is_formatter_dynamic
self._filter = filter_
self._colorize = colorize
self._serialize = serialize
self._enqueue = enqueue
self._error_interceptor = error_interceptor
self._exception_formatter = exception_formatter
self._id = id_
self._levels_ansi_codes = levels_ansi_codes # Warning, reference shared among handlers
self._decolorized_format = None
self._precolorized_formats = {}
self._memoize_dynamic_format = None
self._stopped = False
self._lock = create_handler_lock()
self._queue = None
self._confirmation_event = None
self._confirmation_lock = None
self._owner_process_pid = None
self._thread = None
if self._is_formatter_dynamic:
if self._colorize:
self._memoize_dynamic_format = memoize(prepare_colored_format)
else:
self._memoize_dynamic_format = memoize(prepare_stripped_format)
else:
if self._colorize:
for level_name in self._levels_ansi_codes:
self.update_format(level_name)
else:
self._decolorized_format = self._formatter.strip()
if self._enqueue:
self._queue = multiprocessing.SimpleQueue()
self._confirmation_event = multiprocessing.Event()
self._confirmation_lock = multiprocessing.Lock()
self._owner_process_pid = os.getpid()
self._thread = Thread(
target=self._queued_writer, daemon=True, name="loguru-writer-%d" % self._id
)
self._thread.start()
def __repr__(self):
return "(id=%d, level=%d, sink=%s)" % (self._id, self._levelno, self._name)
def emit(self, record, level_id, from_decorator, is_raw, colored_message):
try:
if self._levelno > record["level"].no:
return
if self._filter is not None:
if not self._filter(record):
return
if self._is_formatter_dynamic:
dynamic_format = self._formatter(record)
formatter_record = record.copy()
if not record["exception"]:
formatter_record["exception"] = ""
else:
type_, value, tb = record["exception"]
formatter = self._exception_formatter
lines = formatter.format_exception(type_, value, tb, from_decorator=from_decorator)
formatter_record["exception"] = "".join(lines)
if colored_message is not None and colored_message.stripped != record["message"]:
colored_message = None
if is_raw:
if colored_message is None or not self._colorize:
formatted = record["message"]
else:
ansi_level = self._levels_ansi_codes[level_id]
formatted = colored_message.colorize(ansi_level)
elif self._is_formatter_dynamic:
if not self._colorize:
precomputed_format = self._memoize_dynamic_format(dynamic_format)
formatted = precomputed_format.format_map(formatter_record)
elif colored_message is None:
ansi_level = self._levels_ansi_codes[level_id]
_, precomputed_format = self._memoize_dynamic_format(dynamic_format, ansi_level)
formatted = precomputed_format.format_map(formatter_record)
else:
ansi_level = self._levels_ansi_codes[level_id]
formatter, precomputed_format = self._memoize_dynamic_format(
dynamic_format, ansi_level
)
coloring_message = formatter.make_coloring_message(
record["message"], ansi_level=ansi_level, colored_message=colored_message
)
formatter_record["message"] = coloring_message
formatted = precomputed_format.format_map(formatter_record)
else:
if not self._colorize:
precomputed_format = self._decolorized_format
formatted = precomputed_format.format_map(formatter_record)
elif colored_message is None:
ansi_level = self._levels_ansi_codes[level_id]
precomputed_format = self._precolorized_formats[level_id]
formatted = precomputed_format.format_map(formatter_record)
else:
ansi_level = self._levels_ansi_codes[level_id]
precomputed_format = self._precolorized_formats[level_id]
coloring_message = self._formatter.make_coloring_message(
record["message"], ansi_level=ansi_level, colored_message=colored_message
)
formatter_record["message"] = coloring_message
formatted = precomputed_format.format_map(formatter_record)
if self._serialize:
formatted = self._serialize_record(formatted, record)
str_record = Message(formatted)
str_record.record = record
with self._lock:
if self._stopped:
return
if self._enqueue:
self._queue.put(str_record)
else:
self._sink.write(str_record)
except Exception:
if not self._error_interceptor.should_catch():
raise
self._error_interceptor.print(record)
def stop(self):
with self._lock:
self._stopped = True
if self._enqueue:
if self._owner_process_pid != os.getpid():
return
self._queue.put(None)
self._thread.join()
if hasattr(self._queue, "close"):
self._queue.close()
self._sink.stop()
def complete_queue(self):
if not self._enqueue:
return
with self._confirmation_lock:
self._queue.put(True)
self._confirmation_event.wait()
self._confirmation_event.clear()
async def complete_async(self):
if self._enqueue and self._owner_process_pid != os.getpid():
return
with self._lock:
await self._sink.complete()
def update_format(self, level_id):
if not self._colorize or self._is_formatter_dynamic:
return
ansi_code = self._levels_ansi_codes[level_id]
self._precolorized_formats[level_id] = self._formatter.colorize(ansi_code)
@property
def levelno(self):
return self._levelno
@staticmethod
def _serialize_record(text, record):
exception = record["exception"]
if exception is not None:
exception = {
"type": None if exception.type is None else exception.type.__name__,
"value": exception.value,
"traceback": bool(record["exception"].traceback),
}
serializable = {
"text": text,
"record": {
"elapsed": {
"repr": record["elapsed"],
"seconds": record["elapsed"].total_seconds(),
},
"exception": exception,
"extra": record["extra"],
"file": {"name": record["file"].name, "path": record["file"].path},
"function": record["function"],
"level": {
"icon": record["level"].icon,
"name": record["level"].name,
"no": record["level"].no,
},
"line": record["line"],
"message": record["message"],
"module": record["module"],
"name": record["name"],
"process": {"id": record["process"].id, "name": record["process"].name},
"thread": {"id": record["thread"].id, "name": record["thread"].name},
"time": {"repr": record["time"], "timestamp": record["time"].timestamp()},
},
}
return json.dumps(serializable, default=str, ensure_ascii=False) + "\n"
def _queued_writer(self):
message = None
queue = self._queue
# We need to use a lock to protect sink during fork.
# Particularly, writing to stderr may lead to deadlock in child process.
lock = create_handler_lock()
while True:
try:
message = queue.get()
except Exception:
with lock:
if not self._error_interceptor.should_catch():
raise
self._error_interceptor.print(None)
continue
if message is None:
break
if message is True:
self._confirmation_event.set()
continue
with lock:
try:
self._sink.write(message)
except Exception:
if not self._error_interceptor.should_catch():
raise
self._error_interceptor.print(message.record)
def __getstate__(self):
state = self.__dict__.copy()
state["_lock"] = None
state["_memoize_dynamic_format"] = None
if self._enqueue:
state["_sink"] = None
state["_thread"] = None
state["_owner_process"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = create_handler_lock()
if self._is_formatter_dynamic:
if self._colorize:
self._memoize_dynamic_format = memoize(prepare_colored_format)
else:
self._memoize_dynamic_format = memoize(prepare_stripped_format) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_handler.py | _handler.py |
import asyncio
import logging
import weakref
from ._asyncio_loop import get_running_loop, get_task_loop
class StreamSink:
def __init__(self, stream):
self._stream = stream
self._flushable = callable(getattr(stream, "flush", None))
self._stoppable = callable(getattr(stream, "stop", None))
self._completable = asyncio.iscoroutinefunction(getattr(stream, "complete", None))
def write(self, message):
self._stream.write(message)
if self._flushable:
self._stream.flush()
def stop(self):
if self._stoppable:
self._stream.stop()
async def complete(self):
if self._completable:
await self._stream.complete()
class StandardSink:
def __init__(self, handler):
self._handler = handler
def write(self, message):
record = message.record
message = str(message)
exc = record["exception"]
record = logging.getLogger().makeRecord(
record["name"],
record["level"].no,
record["file"].path,
record["line"],
message,
(),
(exc.type, exc.value, exc.traceback) if exc else None,
record["function"],
{"extra": record["extra"]},
)
if exc:
record.exc_text = "\n"
self._handler.handle(record)
def stop(self):
self._handler.close()
async def complete(self):
pass
class AsyncSink:
def __init__(self, function, loop, error_interceptor):
self._function = function
self._loop = loop
self._error_interceptor = error_interceptor
self._tasks = weakref.WeakSet()
def write(self, message):
try:
loop = self._loop or get_running_loop()
except RuntimeError:
return
coroutine = self._function(message)
task = loop.create_task(coroutine)
def check_exception(future):
if future.cancelled() or future.exception() is None:
return
if not self._error_interceptor.should_catch():
raise future.exception()
self._error_interceptor.print(message.record, exception=future.exception())
task.add_done_callback(check_exception)
self._tasks.add(task)
def stop(self):
for task in self._tasks:
task.cancel()
async def complete(self):
loop = get_running_loop()
for task in self._tasks:
if get_task_loop(task) is loop:
try:
await task
except Exception:
pass # Handled in "check_exception()"
def __getstate__(self):
state = self.__dict__.copy()
state["_tasks"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._tasks = weakref.WeakSet()
class CallableSink:
def __init__(self, function):
self._function = function
def write(self, message):
self._function(message)
def stop(self):
pass
async def complete(self):
pass | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_simple_sinks.py | _simple_sinks.py |
import re
from string import Formatter
class Style:
RESET_ALL = 0
BOLD = 1
DIM = 2
ITALIC = 3
UNDERLINE = 4
BLINK = 5
REVERSE = 7
STRIKE = 8
HIDE = 9
NORMAL = 22
class Fore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class Back:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
def ansi_escape(codes):
return {name: "\033[%dm" % code for name, code in codes.items()}
class TokenType:
TEXT = 1
ANSI = 2
LEVEL = 3
CLOSING = 4
class AnsiParser:
_style = ansi_escape(
{
"b": Style.BOLD,
"d": Style.DIM,
"n": Style.NORMAL,
"h": Style.HIDE,
"i": Style.ITALIC,
"l": Style.BLINK,
"s": Style.STRIKE,
"u": Style.UNDERLINE,
"v": Style.REVERSE,
"bold": Style.BOLD,
"dim": Style.DIM,
"normal": Style.NORMAL,
"hide": Style.HIDE,
"italic": Style.ITALIC,
"blink": Style.BLINK,
"strike": Style.STRIKE,
"underline": Style.UNDERLINE,
"reverse": Style.REVERSE,
}
)
_foreground = ansi_escape(
{
"k": Fore.BLACK,
"r": Fore.RED,
"g": Fore.GREEN,
"y": Fore.YELLOW,
"e": Fore.BLUE,
"m": Fore.MAGENTA,
"c": Fore.CYAN,
"w": Fore.WHITE,
"lk": Fore.LIGHTBLACK_EX,
"lr": Fore.LIGHTRED_EX,
"lg": Fore.LIGHTGREEN_EX,
"ly": Fore.LIGHTYELLOW_EX,
"le": Fore.LIGHTBLUE_EX,
"lm": Fore.LIGHTMAGENTA_EX,
"lc": Fore.LIGHTCYAN_EX,
"lw": Fore.LIGHTWHITE_EX,
"black": Fore.BLACK,
"red": Fore.RED,
"green": Fore.GREEN,
"yellow": Fore.YELLOW,
"blue": Fore.BLUE,
"magenta": Fore.MAGENTA,
"cyan": Fore.CYAN,
"white": Fore.WHITE,
"light-black": Fore.LIGHTBLACK_EX,
"light-red": Fore.LIGHTRED_EX,
"light-green": Fore.LIGHTGREEN_EX,
"light-yellow": Fore.LIGHTYELLOW_EX,
"light-blue": Fore.LIGHTBLUE_EX,
"light-magenta": Fore.LIGHTMAGENTA_EX,
"light-cyan": Fore.LIGHTCYAN_EX,
"light-white": Fore.LIGHTWHITE_EX,
}
)
_background = ansi_escape(
{
"K": Back.BLACK,
"R": Back.RED,
"G": Back.GREEN,
"Y": Back.YELLOW,
"E": Back.BLUE,
"M": Back.MAGENTA,
"C": Back.CYAN,
"W": Back.WHITE,
"LK": Back.LIGHTBLACK_EX,
"LR": Back.LIGHTRED_EX,
"LG": Back.LIGHTGREEN_EX,
"LY": Back.LIGHTYELLOW_EX,
"LE": Back.LIGHTBLUE_EX,
"LM": Back.LIGHTMAGENTA_EX,
"LC": Back.LIGHTCYAN_EX,
"LW": Back.LIGHTWHITE_EX,
"BLACK": Back.BLACK,
"RED": Back.RED,
"GREEN": Back.GREEN,
"YELLOW": Back.YELLOW,
"BLUE": Back.BLUE,
"MAGENTA": Back.MAGENTA,
"CYAN": Back.CYAN,
"WHITE": Back.WHITE,
"LIGHT-BLACK": Back.LIGHTBLACK_EX,
"LIGHT-RED": Back.LIGHTRED_EX,
"LIGHT-GREEN": Back.LIGHTGREEN_EX,
"LIGHT-YELLOW": Back.LIGHTYELLOW_EX,
"LIGHT-BLUE": Back.LIGHTBLUE_EX,
"LIGHT-MAGENTA": Back.LIGHTMAGENTA_EX,
"LIGHT-CYAN": Back.LIGHTCYAN_EX,
"LIGHT-WHITE": Back.LIGHTWHITE_EX,
}
)
_regex_tag = re.compile(r"\\?</?((?:[fb]g\s)?[^<>\s]*)>")
def __init__(self):
self._tokens = []
self._tags = []
self._color_tokens = []
@staticmethod
def strip(tokens):
output = ""
for type_, value in tokens:
if type_ == TokenType.TEXT:
output += value
return output
@staticmethod
def colorize(tokens, ansi_level):
output = ""
for type_, value in tokens:
if type_ == TokenType.LEVEL:
if ansi_level is None:
raise ValueError(
"The '<level>' color tag is not allowed in this context, "
"it has not yet been associated to any color value."
)
value = ansi_level
output += value
return output
@staticmethod
def wrap(tokens, *, ansi_level, color_tokens):
output = ""
for type_, value in tokens:
if type_ == TokenType.LEVEL:
value = ansi_level
output += value
if type_ == TokenType.CLOSING:
for subtype, subvalue in color_tokens:
if subtype == TokenType.LEVEL:
subvalue = ansi_level
output += subvalue
return output
def feed(self, text, *, raw=False):
if raw:
self._tokens.append((TokenType.TEXT, text))
return
position = 0
for match in self._regex_tag.finditer(text):
markup, tag = match.group(0), match.group(1)
self._tokens.append((TokenType.TEXT, text[position : match.start()]))
position = match.end()
if markup[0] == "\\":
self._tokens.append((TokenType.TEXT, markup[1:]))
continue
if markup[1] == "/":
if self._tags and (tag == "" or tag == self._tags[-1]):
self._tags.pop()
self._color_tokens.pop()
self._tokens.append((TokenType.CLOSING, "\033[0m"))
self._tokens.extend(self._color_tokens)
continue
elif tag in self._tags:
raise ValueError('Closing tag "%s" violates nesting rules' % markup)
else:
raise ValueError('Closing tag "%s" has no corresponding opening tag' % markup)
if tag in {"lvl", "level"}:
token = (TokenType.LEVEL, None)
else:
ansi = self._get_ansicode(tag)
if ansi is None:
raise ValueError(
'Tag "%s" does not correspond to any known ansi directive, '
"make sure you did not misspelled it (or prepend '\\' to escape it)"
% markup
)
token = (TokenType.ANSI, ansi)
self._tags.append(tag)
self._color_tokens.append(token)
self._tokens.append(token)
self._tokens.append((TokenType.TEXT, text[position:]))
def done(self, *, strict=True):
if strict and self._tags:
faulty_tag = self._tags.pop(0)
raise ValueError('Opening tag "<%s>" has no corresponding closing tag' % faulty_tag)
return self._tokens
def current_color_tokens(self):
return list(self._color_tokens)
def _get_ansicode(self, tag):
style = self._style
foreground = self._foreground
background = self._background
# Substitute on a direct match.
if tag in style:
return style[tag]
elif tag in foreground:
return foreground[tag]
elif tag in background:
return background[tag]
# An alternative syntax for setting the color (e.g. <fg red>, <bg red>).
elif tag.startswith("fg ") or tag.startswith("bg "):
st, color = tag[:2], tag[3:]
code = "38" if st == "fg" else "48"
if st == "fg" and color.lower() in foreground:
return foreground[color.lower()]
elif st == "bg" and color.upper() in background:
return background[color.upper()]
elif color.isdigit() and int(color) <= 255:
return "\033[%s;5;%sm" % (code, color)
elif re.match(r"#(?:[a-fA-F0-9]{3}){1,2}$", color):
hex_color = color[1:]
if len(hex_color) == 3:
hex_color *= 2
rgb = tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4))
return "\033[%s;2;%s;%s;%sm" % ((code,) + rgb)
elif color.count(",") == 2:
colors = tuple(color.split(","))
if all(x.isdigit() and int(x) <= 255 for x in colors):
return "\033[%s;2;%s;%s;%sm" % ((code,) + colors)
return None
class ColoringMessage(str):
__fields__ = ("_messages",)
def __format__(self, spec):
return next(self._messages).__format__(spec)
class ColoredMessage:
def __init__(self, tokens):
self.tokens = tokens
self.stripped = AnsiParser.strip(tokens)
def colorize(self, ansi_level):
return AnsiParser.colorize(self.tokens, ansi_level)
class ColoredFormat:
def __init__(self, tokens, messages_color_tokens):
self._tokens = tokens
self._messages_color_tokens = messages_color_tokens
def strip(self):
return AnsiParser.strip(self._tokens)
def colorize(self, ansi_level):
return AnsiParser.colorize(self._tokens, ansi_level)
def make_coloring_message(self, message, *, ansi_level, colored_message):
messages = [
message
if color_tokens is None
else AnsiParser.wrap(
colored_message.tokens, ansi_level=ansi_level, color_tokens=color_tokens
)
for color_tokens in self._messages_color_tokens
]
coloring = ColoringMessage(message)
coloring._messages = iter(messages)
return coloring
class Colorizer:
@staticmethod
def prepare_format(string):
tokens, messages_color_tokens = Colorizer._parse_without_formatting(string)
return ColoredFormat(tokens, messages_color_tokens)
@staticmethod
def prepare_message(string, args=(), kwargs={}):
tokens = Colorizer._parse_with_formatting(string, args, kwargs)
return ColoredMessage(tokens)
@staticmethod
def prepare_simple_message(string):
parser = AnsiParser()
parser.feed(string)
tokens = parser.done()
return ColoredMessage(tokens)
@staticmethod
def ansify(text):
parser = AnsiParser()
parser.feed(text.strip())
tokens = parser.done(strict=False)
return AnsiParser.colorize(tokens, None)
@staticmethod
def _parse_with_formatting(
string, args, kwargs, *, recursion_depth=2, auto_arg_index=0, recursive=False
):
# This function re-implements Formatter._vformat()
if recursion_depth < 0:
raise ValueError("Max string recursion exceeded")
formatter = Formatter()
parser = AnsiParser()
for literal_text, field_name, format_spec, conversion in formatter.parse(string):
parser.feed(literal_text, raw=recursive)
if field_name is not None:
if field_name == "":
if auto_arg_index is False:
raise ValueError(
"cannot switch from manual field "
"specification to automatic field "
"numbering"
)
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError(
"cannot switch from manual field "
"specification to automatic field "
"numbering"
)
auto_arg_index = False
obj, _ = formatter.get_field(field_name, args, kwargs)
obj = formatter.convert_field(obj, conversion)
format_spec, auto_arg_index = Colorizer._parse_with_formatting(
format_spec,
args,
kwargs,
recursion_depth=recursion_depth - 1,
auto_arg_index=auto_arg_index,
recursive=True,
)
formatted = formatter.format_field(obj, format_spec)
parser.feed(formatted, raw=True)
tokens = parser.done()
if recursive:
return AnsiParser.strip(tokens), auto_arg_index
return tokens
@staticmethod
def _parse_without_formatting(string, *, recursion_depth=2, recursive=False):
if recursion_depth < 0:
raise ValueError("Max string recursion exceeded")
formatter = Formatter()
parser = AnsiParser()
messages_color_tokens = []
for literal_text, field_name, format_spec, conversion in formatter.parse(string):
if literal_text and literal_text[-1] in "{}":
literal_text += literal_text[-1]
parser.feed(literal_text, raw=recursive)
if field_name is not None:
if field_name == "message":
if recursive:
messages_color_tokens.append(None)
else:
color_tokens = parser.current_color_tokens()
messages_color_tokens.append(color_tokens)
field = "{%s" % field_name
if conversion:
field += "!%s" % conversion
if format_spec:
field += ":%s" % format_spec
field += "}"
parser.feed(field, raw=True)
_, color_tokens = Colorizer._parse_without_formatting(
format_spec, recursion_depth=recursion_depth - 1, recursive=True
)
messages_color_tokens.extend(color_tokens)
return parser.done(), messages_color_tokens | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_colorizer.py | _colorizer.py |
import datetime
import re
class Frequencies:
@staticmethod
def hourly(t):
dt = t + datetime.timedelta(hours=1)
return dt.replace(minute=0, second=0, microsecond=0)
@staticmethod
def daily(t):
dt = t + datetime.timedelta(days=1)
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
@staticmethod
def weekly(t):
dt = t + datetime.timedelta(days=7 - t.weekday())
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
@staticmethod
def monthly(t):
if t.month == 12:
y, m = t.year + 1, 1
else:
y, m = t.year, t.month + 1
return t.replace(year=y, month=m, day=1, hour=0, minute=0, second=0, microsecond=0)
@staticmethod
def yearly(t):
y = t.year + 1
return t.replace(year=y, month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
def parse_size(size):
size = size.strip()
reg = re.compile(r"([e\+\-\.\d]+)\s*([kmgtpezy])?(i)?(b)", flags=re.I)
match = reg.fullmatch(size)
if not match:
return None
s, u, i, b = match.groups()
try:
s = float(s)
except ValueError as e:
raise ValueError("Invalid float value while parsing size: '%s'" % s) from e
u = "kmgtpezy".index(u.lower()) + 1 if u else 0
i = 1024 if i else 1000
b = {"b": 8, "B": 1}[b] if b else 1
size = s * i ** u / b
return size
def parse_duration(duration):
duration = duration.strip()
reg = r"(?:([e\+\-\.\d]+)\s*([a-z]+)[\s\,]*)"
units = [
("y|years?", 31536000),
("months?", 2628000),
("w|weeks?", 604800),
("d|days?", 86400),
("h|hours?", 3600),
("min(?:ute)?s?", 60),
("s|sec(?:ond)?s?", 1),
("ms|milliseconds?", 0.001),
("us|microseconds?", 0.000001),
]
if not re.fullmatch(reg + "+", duration, flags=re.I):
return None
seconds = 0
for value, unit in re.findall(reg, duration, flags=re.I):
try:
value = float(value)
except ValueError as e:
raise ValueError("Invalid float value while parsing duration: '%s'" % value) from e
try:
unit = next(u for r, u in units if re.fullmatch(r, unit, flags=re.I))
except StopIteration:
raise ValueError("Invalid unit value while parsing duration: '%s'" % unit) from None
seconds += value * unit
return datetime.timedelta(seconds=seconds)
def parse_frequency(frequency):
frequencies = {
"hourly": Frequencies.hourly,
"daily": Frequencies.daily,
"weekly": Frequencies.weekly,
"monthly": Frequencies.monthly,
"yearly": Frequencies.yearly,
}
frequency = frequency.strip().lower()
return frequencies.get(frequency, None)
def parse_day(day):
days = {
"monday": 0,
"tuesday": 1,
"wednesday": 2,
"thursday": 3,
"friday": 4,
"saturday": 5,
"sunday": 6,
}
day = day.strip().lower()
if day in days:
return days[day]
elif day.startswith("w") and day[1:].isdigit():
day = int(day[1:])
if not 0 <= day < 7:
raise ValueError("Invalid weekday value while parsing day (expected [0-6]): '%d'" % day)
else:
day = None
return day
def parse_time(time):
time = time.strip()
reg = re.compile(r"^[\d\.\:]+\s*(?:[ap]m)?$", flags=re.I)
if not reg.match(time):
return None
formats = [
"%H",
"%H:%M",
"%H:%M:%S",
"%H:%M:%S.%f",
"%I %p",
"%I:%M %S",
"%I:%M:%S %p",
"%I:%M:%S.%f %p",
]
for format_ in formats:
try:
dt = datetime.datetime.strptime(time, format_)
except ValueError:
pass
else:
return dt.time()
raise ValueError("Unrecognized format while parsing time: '%s'" % time)
def parse_daytime(daytime):
daytime = daytime.strip()
reg = re.compile(r"^(.*?)\s+at\s+(.*)$", flags=re.I)
match = reg.match(daytime)
if match:
day, time = match.groups()
else:
day = time = daytime
try:
day = parse_day(day)
if match and day is None:
raise ValueError
except ValueError as e:
raise ValueError("Invalid day while parsing daytime: '%s'" % day) from e
try:
time = parse_time(time)
if match and time is None:
raise ValueError
except ValueError as e:
raise ValueError("Invalid time while parsing daytime: '%s'" % time) from e
if day is None and time is None:
return None
return day, time | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_string_parsers.py | _string_parsers.py |
import os
try:
from ctypes import byref, get_last_error, wintypes, FormatError, WinDLL, WinError
kernel32 = WinDLL("kernel32", use_last_error=True)
CreateFileW = kernel32.CreateFileW
SetFileTime = kernel32.SetFileTime
CloseHandle = kernel32.CloseHandle
CreateFileW.argtypes = (
wintypes.LPWSTR,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.HANDLE,
)
CreateFileW.restype = wintypes.HANDLE
SetFileTime.argtypes = (
wintypes.HANDLE,
wintypes.PFILETIME,
wintypes.PFILETIME,
wintypes.PFILETIME,
)
SetFileTime.restype = wintypes.BOOL
CloseHandle.argtypes = (wintypes.HANDLE,)
CloseHandle.restype = wintypes.BOOL
except (ImportError, AttributeError, OSError, ValueError):
SUPPORTED = False
else:
SUPPORTED = os.name == "nt"
__version__ = "1.1.0"
__all__ = ["setctime"]
def setctime(filepath, timestamp, *, follow_symlinks=True):
"""Set the "ctime" (creation time) attribute of a file given an unix timestamp (Windows only)."""
if not SUPPORTED:
raise OSError("This function is only available for the Windows platform.")
filepath = os.path.normpath(os.path.abspath(str(filepath)))
timestamp = int(timestamp * 10000000) + 116444736000000000
if not 0 < timestamp < (1 << 64):
raise ValueError("The system value of the timestamp exceeds u64 size: %d" % timestamp)
atime = wintypes.FILETIME(0xFFFFFFFF, 0xFFFFFFFF)
mtime = wintypes.FILETIME(0xFFFFFFFF, 0xFFFFFFFF)
ctime = wintypes.FILETIME(timestamp & 0xFFFFFFFF, timestamp >> 32)
flags = 128 | 0x02000000
if not follow_symlinks:
flags |= 0x00200000
handle = wintypes.HANDLE(CreateFileW(filepath, 256, 0, None, 3, flags, None))
if handle.value == wintypes.HANDLE(-1).value:
raise WinError(get_last_error())
if not wintypes.BOOL(SetFileTime(handle, byref(ctime), byref(atime), byref(mtime))):
raise WinError(get_last_error())
if not wintypes.BOOL(CloseHandle(handle)):
raise WinError(get_last_error()) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/win32_setctime.py | win32_setctime.py |
from os import environ
def env(key, type_, default=None):
if key not in environ:
return default
val = environ[key]
if type_ == str:
return val
elif type_ == bool:
if val.lower() in ["1", "true", "yes", "y", "ok", "on"]:
return True
if val.lower() in ["0", "false", "no", "n", "nok", "off"]:
return False
raise ValueError(
"Invalid environment variable '%s' (expected a boolean): '%s'" % (key, val)
)
elif type_ == int:
try:
return int(val)
except ValueError:
raise ValueError(
"Invalid environment variable '%s' (expected an integer): '%s'" % (key, val)
) from None
LOGURU_AUTOINIT = env("LOGURU_AUTOINIT", bool, True)
LOGURU_FORMAT = env(
"LOGURU_FORMAT",
str,
"<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
"<level>{level: <8}</level> | "
"<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
)
LOGURU_FILTER = env("LOGURU_FILTER", str, None)
LOGURU_LEVEL = env("LOGURU_LEVEL", str, "DEBUG")
LOGURU_COLORIZE = env("LOGURU_COLORIZE", bool, None)
LOGURU_SERIALIZE = env("LOGURU_SERIALIZE", bool, False)
LOGURU_BACKTRACE = env("LOGURU_BACKTRACE", bool, True)
LOGURU_DIAGNOSE = env("LOGURU_DIAGNOSE", bool, True)
LOGURU_ENQUEUE = env("LOGURU_ENQUEUE", bool, False)
LOGURU_CATCH = env("LOGURU_CATCH", bool, True)
LOGURU_TRACE_NO = env("LOGURU_TRACE_NO", int, 5)
LOGURU_TRACE_COLOR = env("LOGURU_TRACE_COLOR", str, "<cyan><bold>")
LOGURU_TRACE_ICON = env("LOGURU_TRACE_ICON", str, "✏️") # Pencil
LOGURU_DEBUG_NO = env("LOGURU_DEBUG_NO", int, 10)
LOGURU_DEBUG_COLOR = env("LOGURU_DEBUG_COLOR", str, "<blue><bold>")
LOGURU_DEBUG_ICON = env("LOGURU_DEBUG_ICON", str, "🐞") # Lady Beetle
LOGURU_INFO_NO = env("LOGURU_INFO_NO", int, 20)
LOGURU_INFO_COLOR = env("LOGURU_INFO_COLOR", str, "<bold>")
LOGURU_INFO_ICON = env("LOGURU_INFO_ICON", str, "ℹ️") # Information
LOGURU_SUCCESS_NO = env("LOGURU_SUCCESS_NO", int, 25)
LOGURU_SUCCESS_COLOR = env("LOGURU_SUCCESS_COLOR", str, "<green><bold>")
LOGURU_SUCCESS_ICON = env("LOGURU_SUCCESS_ICON", str, "✔️") # Heavy Check Mark
LOGURU_WARNING_NO = env("LOGURU_WARNING_NO", int, 30)
LOGURU_WARNING_COLOR = env("LOGURU_WARNING_COLOR", str, "<yellow><bold>")
LOGURU_WARNING_ICON = env("LOGURU_WARNING_ICON", str, "⚠️") # Warning
LOGURU_ERROR_NO = env("LOGURU_ERROR_NO", int, 40)
LOGURU_ERROR_COLOR = env("LOGURU_ERROR_COLOR", str, "<red><bold>")
LOGURU_ERROR_ICON = env("LOGURU_ERROR_ICON", str, "❌") # Cross Mark
LOGURU_CRITICAL_NO = env("LOGURU_CRITICAL_NO", int, 50)
LOGURU_CRITICAL_COLOR = env("LOGURU_CRITICAL_COLOR", str, "<RED><bold>")
LOGURU_CRITICAL_ICON = env("LOGURU_CRITICAL_ICON", str, "☠️") # Skull and Crossbones | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/_defaults.py | _defaults.py |
import atexit as _atexit
import sys
import sys as _sys
from . import _defaults
from ._logger import Core as _Core
from ._logger import Logger as _Logger
__version__ = "0.1.9"
__all__ = ["logger", "Log"]
logger = _Logger(
core=_Core(),
exception=None,
depth=0,
record=False,
lazy=False,
colors=False,
raw=False,
capture=True,
patcher=None,
extra={},
)
if _defaults.LOGURU_AUTOINIT and _sys.stderr:
logger.add(_sys.stderr)
_atexit.register(logger.remove)
config = {
"format": "{time:YYYY-MM-DD HH:mm:ss} | {level} | {file}:{line} | {message}",
"level": "INFO",
"rotation": "100 MB",
"compression": "zip",
"enqueue": True,
"encoding": "utf-8",
"serialize": True,
"retention": 10,
}
class Log:
"""
日志对象
"""
def __init__(self, log_file_path: str = "log.log",
level: str = "INFO",
rotation: str = "100 MB",
serialize: bool = False,
full_path: bool = False,
retention: int = 10,
debug: bool = True,
is_only_console: bool = False,
):
"""
创建日志对象
:param level 日志等级
:param rotation 单个日志文件大小
:param serialize 是否开启格式化日志
:param full_path 是否启用全路径。默认关闭,开启后日志路径的模块路径显示为完整绝对路径。
:param retention 日志文件备份个数
:param debug 是否为开发环境
:param is_only_console 是否只输出到控制台
"""
# 初始化日志
logger.remove()
# 日志等级
self.level = level.upper()
config["level"] = level.upper()
# 日志大小
self.rotation = rotation
config["rotation"] = rotation
# 是否json化
self.serialize = serialize
config["serialize"] = serialize
# 日志个数
self.retention = retention
config["retention"] = retention
# 日志格式
self.format = format
# 是否为全路径
self.full_path = full_path
# 是否为debug模式
self.__debug = debug
# 是否只输出到控制台
self.__is_only_console = is_only_console
if is_only_console:
# 创建控制台日志
if self.__debug:
# logger.add(sys.stderr, level="DEBUG", format=format)
logger.add(sys.stderr, level="DEBUG")
else:
logger.add(sys.stderr, level=level.upper())
self.__set_logger_method(logger)
else:
# 创建文件日志
# 颜色说明:green 绿色 level 等级颜色 cyan 天蓝色
# config["format"] = format
logger.add(log_file_path, **config)
# 创建控制台日志
if self.__debug:
# logger.add(sys.stderr, level="DEBUG", format=format)
logger.add(sys.stderr, level="DEBUG")
self.__set_logger_method(logger)
# 捕获错误
self.catch = logger.catch
def __set_logger_method(self, logger):
"""
设置日志方法
"""
# 日志方法
self.debug = logger.debug
self.info = logger.info
self.success = logger.success
self.warning = logger.warning
self.error = logger.error
self.critical = logger.critical | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/__init__.py | __init__.py |
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __enter__(self, *args, **kwargs):
# special method lookup bypasses __getattr__/__getattribute__, see
# https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
# thus, contextlib magic methods are not proxied via __getattr__
return self.__wrapped.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
return self.__wrapped.__exit__(*args, **kwargs)
def write(self, text):
self.__convertor.write(text)
def isatty(self):
stream = self.__wrapped
if 'PYCHARM_HOSTED' in os.environ:
if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
return True
try:
stream_isatty = stream.isatty
except AttributeError:
return False
else:
return stream_isatty()
@property
def closed(self):
stream = self.__wrapped
try:
return stream.closed
except AttributeError:
return True
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not self.stream.closed and self.stream.isatty()
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not self.stream.closed:
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command == BEL:
if paramstring.count(";") == 1:
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/colorama/ansitowin32.py | ansitowin32.py |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
def _winapi_test(handle):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = _GetStdHandle(stream_id)
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = _GetStdHandle(stream_id)
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/colorama/win32.py | win32.py |
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title) | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/colorama/winterm.py | winterm.py |
CSI = '\033['
OSC = '\033]'
BEL = '\a'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor() | zdppy-log | /zdppy_log-0.2.0.tar.gz/zdppy_log-0.2.0/zdppy_log/colorama/ansi.py | ansi.py |
from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON, unichr
import datetime
from decimal import Decimal
import re
import time
from zdppy_mysql.constants import FIELD_TYPE
def escape_item(val, charset, mapping=None):
if mapping is None:
mapping = encoders
encoder = mapping.get(type(val))
# Fallback to default when no encoder found
if not encoder:
try:
encoder = mapping[text_type]
except KeyError:
raise TypeError("no default type converter defined")
if encoder in (escape_dict, escape_sequence):
val = encoder(val, charset, mapping)
else:
val = encoder(val, mapping)
return val
def escape_dict(val, charset, mapping=None):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset, mapping)
n[k] = quoted
return n
def escape_sequence(val, charset, mapping=None):
n = []
for item in val:
quoted = escape_item(item, charset, mapping)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset, mapping=None):
return ','.join([escape_item(x, charset, mapping) for x in val])
def escape_bool(value, mapping=None):
return str(int(value))
def escape_object(value, mapping=None):
return str(value)
def escape_int(value, mapping=None):
return str(value)
def escape_float(value, mapping=None):
return ('%.15g' % value)
_escape_table = [unichr(x) for x in range(128)]
_escape_table[0] = u'\\0'
_escape_table[ord('\\')] = u'\\\\'
_escape_table[ord('\n')] = u'\\n'
_escape_table[ord('\r')] = u'\\r'
_escape_table[ord('\032')] = u'\\Z'
_escape_table[ord('"')] = u'\\"'
_escape_table[ord("'")] = u"\\'"
def _escape_unicode(value, mapping=None):
"""escapes *value* without adding quote.
Value should be unicode
"""
return value.translate(_escape_table)
if PY2:
def escape_string(value, mapping=None):
"""escape_string escapes *value* but not surround it with quotes.
Value should be bytes or unicode.
"""
if isinstance(value, unicode):
return _escape_unicode(value)
assert isinstance(value, (bytes, bytearray))
value = value.replace('\\', '\\\\')
value = value.replace('\0', '\\0')
value = value.replace('\n', '\\n')
value = value.replace('\r', '\\r')
value = value.replace('\032', '\\Z')
value = value.replace("'", "\\'")
value = value.replace('"', '\\"')
return value
def escape_bytes_prefixed(value, mapping=None):
assert isinstance(value, (bytes, bytearray))
return b"_binary'%s'" % escape_string(value)
def escape_bytes(value, mapping=None):
assert isinstance(value, (bytes, bytearray))
return b"'%s'" % escape_string(value)
else:
escape_string = _escape_unicode
# On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow.
# (fixed in Python 3.6, http://bugs.python.org/issue24870)
# Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff.
# We can escape special chars and surrogateescape at once.
_escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)]
def escape_bytes_prefixed(value, mapping=None):
return "_binary'%s'" % value.decode('latin1').translate(_escape_bytes_table)
def escape_bytes(value, mapping=None):
return "'%s'" % value.decode('latin1').translate(_escape_bytes_table)
def escape_unicode(value, mapping=None):
return u"'%s'" % _escape_unicode(value)
def escape_str(value, mapping=None):
return "'%s'" % escape_string(str(value), mapping)
def escape_None(value, mapping=None):
return 'NULL'
def escape_timedelta(obj, mapping=None):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
if obj.microseconds:
fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'"
else:
fmt = "'{0:02d}:{1:02d}:{2:02d}'"
return fmt.format(hours, minutes, seconds, obj.microseconds)
def escape_time(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_datetime(obj, mapping=None):
if obj.microsecond:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
else:
fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'"
return fmt.format(obj)
def escape_date(obj, mapping=None):
fmt = "'{0.year:04}-{0.month:02}-{0.day:02}'"
return fmt.format(obj)
def escape_struct_time(obj, mapping=None):
return escape_datetime(datetime.datetime(*obj[:6]))
def _convert_second_fraction(s):
if not s:
return 0
# Pad zeros to ensure the fraction length in microseconds
s = s.ljust(6, '0')
return int(s[:6])
DATETIME_RE = re.compile(r"(\d{1,4})-(\d{1,2})-(\d{1,2})[T ](\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = DATETIME_RE.match(obj)
if not m:
return convert_date(obj)
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
return datetime.datetime(*[ int(x) for x in groups ])
except ValueError:
return convert_date(obj)
TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIMEDELTA_RE.match(obj)
if not m:
return obj
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
negate = -1 if groups[0] else 1
hours, minutes, seconds, microseconds = groups[1:]
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return obj
TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIME_RE.match(obj)
if not m:
return obj
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
hours, minutes, seconds, microseconds = groups
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return obj
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
if not PY2 and isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return obj
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if not PY2 and isinstance(timestamp, (bytes, bytearray)):
timestamp = timestamp.decode('ascii')
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return timestamp
def convert_set(s):
if isinstance(s, (bytes, bytearray)):
return set(s.split(b","))
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
frozenset: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: escape_object,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = encoders.copy()
conversions.update(decoders)
Thing2Literal = escape_str | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/converters.py | converters.py |
from __future__ import print_function, absolute_import
from functools import partial
import re
import warnings
from ._compat import range_type, text_type, PY2
from zdppy_mysql import err
#: Regular expression for :meth:`Cursor.executemany`.
#: executemany only suports simple bulk insert.
#: You can use it to load large dataset.
RE_INSERT_VALUES = re.compile(
r"\s*((?:INSERT|REPLACE)\b.+\bVALUES?\s*)" +
r"(\(\s*(?:%s|%\(.+\)s)\s*(?:,\s*(?:%s|%\(.+\)s)\s*)*\))" +
r"(\s*(?:ON DUPLICATE.*)?);?\s*\Z",
re.IGNORECASE | re.DOTALL)
class Cursor(object):
"""
This is the object you use to interact with the database.
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
See `Cursor <https://www.python.org/dev/peps/pep-0249/#cursor-objects>`_ in
the specification.
"""
#: Max statement size which :meth:`executemany` generates.
#:
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
#: Default value of max_allowed_packet is 1048576.
max_stmt_length = 1024000
_defer_warnings = False
def __init__(self, connection):
self.connection = connection
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self._result = None
self._rows = None
self._warnings_handled = False
def close(self):
"""
Closing a cursor just exhausts all remaining data.
"""
conn = self.connection
if conn is None:
return
try:
while self.nextset():
pass
finally:
self.connection = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def _get_db(self):
if not self.connection:
raise err.ProgrammingError("Cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
raise err.ProgrammingError("execute() first")
def _conv_row(self, row):
return row
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _nextset(self, unbuffered=False):
"""Get the next query set"""
conn = self._get_db()
current_result = self._result
# for unbuffered queries warnings are only available once whole result has been read
if unbuffered:
self._show_warnings()
if current_result is None or current_result is not conn._result:
return None
if not current_result.has_next:
return None
self._result = None
self._clear_result()
conn.next_result(unbuffered=unbuffered)
self._do_get_result()
return True
def nextset(self):
return self._nextset(False)
def _ensure_bytes(self, x, encoding=None):
if isinstance(x, text_type):
x = x.encode(encoding)
elif isinstance(x, (tuple, list)):
x = type(x)(self._ensure_bytes(v, encoding=encoding) for v in x)
return x
def _escape_args(self, args, conn):
ensure_bytes = partial(self._ensure_bytes, encoding=conn.encoding)
if isinstance(args, (tuple, list)):
if PY2:
args = tuple(map(ensure_bytes, args))
return tuple(conn.literal(arg) for arg in args)
elif isinstance(args, dict):
if PY2:
args = {ensure_bytes(key): ensure_bytes(val) for
(key, val) in args.items()}
return {key: conn.literal(val) for (key, val) in args.items()}
else:
# If it's not a dictionary let's try escaping it anyways.
# Worst case it will throw a Value error
if PY2:
args = ensure_bytes(args)
return conn.escape(args)
def mogrify(self, query, args=None):
"""
Returns the exact string that is sent to the database by calling the
execute() method.
This method follows the extension to the DB API 2.0 followed by Psycopg.
"""
conn = self._get_db()
if PY2: # Use bytes on Python 2 always
query = self._ensure_bytes(query, encoding=conn.encoding)
if args is not None:
query = query % self._escape_args(args, conn)
return query
def execute(self, query, args=None):
"""Execute a query
:param str query: Query to execute.
:param args: parameters used with query. (optional)
:type args: tuple, list or dict
:return: Number of affected rows
:rtype: int
If args is a list or tuple, %s can be used as a placeholder in the query.
If args is a dict, %(name)s can be used as a placeholder in the query.
"""
while self.nextset():
pass
query = self.mogrify(query, args)
result = self._query(query)
self._executed = query
return result
def executemany(self, query, args):
# type: (str, list) -> int
"""Run several data against one query
:param query: query to execute on server
:param args: Sequence of sequences or mappings. It is used as parameter.
:return: Number of rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1) % ()
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
return self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
self.rowcount = sum(self.execute(query, arg) for arg in args)
return self.rowcount
def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding):
conn = self._get_db()
escape = self._escape_args
if isinstance(prefix, text_type):
prefix = prefix.encode(encoding)
if PY2 and isinstance(values, text_type):
values = values.encode(encoding)
if isinstance(postfix, text_type):
postfix = postfix.encode(encoding)
sql = bytearray(prefix)
args = iter(args)
v = values % escape(next(args), conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
sql += v
rows = 0
for arg in args:
v = values % escape(arg, conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length:
rows += self.execute(sql + postfix)
sql = bytearray(prefix)
else:
sql += b','
sql += v
rows += self.execute(sql + postfix)
self.rowcount = rows
return rows
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
if args:
fmt = '@_{0}_%d=%s'.format(procname)
self._query('SET %s' % ','.join(fmt % (index, conn.escape(arg))
for index, arg in enumerate(args)))
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
self._query(q)
self._executed = q
return args
def fetchone(self):
"""Fetch the next row"""
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
"""Fetch several rows"""
self._check_executed()
if self._rows is None:
return ()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetch all the rows"""
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
if not (0 <= r < len(self._rows)):
raise IndexError("out of range")
self.rownumber = r
def _query(self, q):
conn = self._get_db()
self._last_executed = q
self._clear_result()
conn.query(q)
self._do_get_result()
return self.rowcount
def _clear_result(self):
self.rownumber = 0
self._result = None
self.rowcount = 0
self.description = None
self.lastrowid = None
self._rows = None
def _do_get_result(self):
conn = self._get_db()
self._result = result = conn._result
self.rowcount = result.affected_rows
self.description = result.description
self.lastrowid = result.insert_id
self._rows = result.rows
self._warnings_handled = False
if not self._defer_warnings:
self._show_warnings()
def _show_warnings(self):
if self._warnings_handled:
return
self._warnings_handled = True
if self._result and (self._result.has_next or not self._result.warning_count):
return
ws = self._get_db().show_warnings()
if ws is None:
return
for w in ws:
msg = w[-1]
if PY2:
if isinstance(msg, unicode):
msg = msg.encode('utf-8', 'replace')
warnings.warn(err.Warning(*w[1:3]), stacklevel=4)
def __iter__(self):
return iter(self.fetchone, None)
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class DictCursorMixin(object):
# You can override this to use OrderedDict or other dict-like types.
dict_type = dict
def _do_get_result(self):
super(DictCursorMixin, self)._do_get_result()
fields = []
if self.description:
for f in self._result.fields:
name = f.name
if name in fields:
name = f.table_name + '.' + name
fields.append(name)
self._fields = fields
if fields and self._rows:
self._rows = [self._conv_row(r) for r in self._rows]
def _conv_row(self, row):
if row is None:
return None
return self.dict_type(zip(self._fields, row))
class DictCursor(DictCursorMixin, Cursor):
"""A cursor which returns results as a dictionary"""
class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this is the client uses much less memory,
and rows are returned much faster when traveling over a slow network
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
_defer_warnings = True
def _conv_row(self, row):
return row
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
self._result._finish_unbuffered_query()
try:
while self.nextset():
pass
finally:
self.connection = None
__del__ = close
def _query(self, q):
conn = self._get_db()
self._last_executed = q
self._clear_result()
conn.query(q, unbuffered=True)
self._do_get_result()
return self.rowcount
def nextset(self):
return self._nextset(unbuffered=True)
def read_next(self):
"""Read next row"""
return self._conv_row(self._result._read_rowdata_packet_unbuffered())
def fetchone(self):
"""Fetch next row"""
self._check_executed()
row = self.read_next()
if row is None:
self._show_warnings()
return None
self.rownumber += 1
return row
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
return list(self.fetchall_unbuffered())
def fetchall_unbuffered(self):
"""
Fetch all, implemented as a generator, which isn't to standard,
however, it doesn't make sense to return everything in a list, as that
would use ridiculous memory for large result sets.
"""
return iter(self.fetchone, None)
def __iter__(self):
return self.fetchall_unbuffered()
def fetchmany(self, size=None):
"""Fetch many"""
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = self.read_next()
if row is None:
self._show_warnings()
break
rows.append(row)
self.rownumber += 1
return rows
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
class SSDictCursor(DictCursorMixin, SSCursor):
"""An unbuffered cursor, which returns results as a dictionary""" | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/cursors.py | cursors.py |
MBLENGTH = {
8:1,
33:3,
88:2,
91:2
}
class Charset(object):
def __init__(self, id, name, collation, is_default):
self.id, self.name, self.collation = id, name, collation
self.is_default = is_default == 'Yes'
def __repr__(self):
return "Charset(id=%s, name=%r, collation=%r)" % (
self.id, self.name, self.collation)
@property
def encoding(self):
name = self.name
if name in ('utf8mb4', 'utf8mb3'):
return 'utf8'
return name
@property
def is_binary(self):
return self.id == 63
class Charsets:
def __init__(self):
self._by_id = {}
self._by_name = {}
def add(self, c):
self._by_id[c.id] = c
if c.is_default:
self._by_name[c.name] = c
def by_id(self, id):
return self._by_id[id]
def by_name(self, name):
return self._by_name.get(name.lower())
_charsets = Charsets()
"""
Generated with:
mysql -N -s -e "select id, character_set_name, collation_name, is_default
from information_schema.collations order by id;" | python -c "import sys
for l in sys.stdin.readlines():
id, name, collation, is_default = l.split(chr(9))
print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \
% (id, name, collation, is_default.strip())
"
"""
_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes'))
_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', ''))
_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes'))
_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes'))
_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', ''))
_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes'))
_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes'))
_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes'))
_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes'))
_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes'))
_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes'))
_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes'))
_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes'))
_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', ''))
_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', ''))
_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes'))
_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes'))
_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes'))
_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', ''))
_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', ''))
_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes'))
_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', ''))
_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes'))
_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes'))
_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes'))
_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', ''))
_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes'))
_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', ''))
_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes'))
_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', ''))
_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes'))
_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes'))
_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', ''))
_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes'))
_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes'))
_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes'))
_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes'))
_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes'))
_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes'))
_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', ''))
_charsets.add(Charset(43, 'macce', 'macce_bin', ''))
_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', ''))
_charsets.add(Charset(45, 'utf8mb4', 'utf8mb4_general_ci', 'Yes'))
_charsets.add(Charset(46, 'utf8mb4', 'utf8mb4_bin', ''))
_charsets.add(Charset(47, 'latin1', 'latin1_bin', ''))
_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', ''))
_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', ''))
_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', ''))
_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes'))
_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', ''))
_charsets.add(Charset(53, 'macroman', 'macroman_bin', ''))
_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes'))
_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', ''))
_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes'))
_charsets.add(Charset(63, 'binary', 'binary', 'Yes'))
_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', ''))
_charsets.add(Charset(65, 'ascii', 'ascii_bin', ''))
_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', ''))
_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', ''))
_charsets.add(Charset(68, 'cp866', 'cp866_bin', ''))
_charsets.add(Charset(69, 'dec8', 'dec8_bin', ''))
_charsets.add(Charset(70, 'greek', 'greek_bin', ''))
_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', ''))
_charsets.add(Charset(72, 'hp8', 'hp8_bin', ''))
_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', ''))
_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', ''))
_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', ''))
_charsets.add(Charset(76, 'utf8', 'utf8_tolower_ci', ''))
_charsets.add(Charset(77, 'latin2', 'latin2_bin', ''))
_charsets.add(Charset(78, 'latin5', 'latin5_bin', ''))
_charsets.add(Charset(79, 'latin7', 'latin7_bin', ''))
_charsets.add(Charset(80, 'cp850', 'cp850_bin', ''))
_charsets.add(Charset(81, 'cp852', 'cp852_bin', ''))
_charsets.add(Charset(82, 'swe7', 'swe7_bin', ''))
_charsets.add(Charset(83, 'utf8', 'utf8_bin', ''))
_charsets.add(Charset(84, 'big5', 'big5_bin', ''))
_charsets.add(Charset(85, 'euckr', 'euckr_bin', ''))
_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', ''))
_charsets.add(Charset(87, 'gbk', 'gbk_bin', ''))
_charsets.add(Charset(88, 'sjis', 'sjis_bin', ''))
_charsets.add(Charset(89, 'tis620', 'tis620_bin', ''))
_charsets.add(Charset(91, 'ujis', 'ujis_bin', ''))
_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes'))
_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', ''))
_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', ''))
_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes'))
_charsets.add(Charset(96, 'cp932', 'cp932_bin', ''))
_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes'))
_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', ''))
_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', ''))
_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', ''))
_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', ''))
_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', ''))
_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', ''))
_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', ''))
_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', ''))
_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', ''))
_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', ''))
_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', ''))
_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', ''))
_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', ''))
_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', ''))
_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', ''))
_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', ''))
_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', ''))
_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', ''))
_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', ''))
_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', ''))
_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', ''))
_charsets.add(Charset(211, 'utf8', 'utf8_sinhala_ci', ''))
_charsets.add(Charset(212, 'utf8', 'utf8_german2_ci', ''))
_charsets.add(Charset(213, 'utf8', 'utf8_croatian_ci', ''))
_charsets.add(Charset(214, 'utf8', 'utf8_unicode_520_ci', ''))
_charsets.add(Charset(215, 'utf8', 'utf8_vietnamese_ci', ''))
_charsets.add(Charset(223, 'utf8', 'utf8_general_mysql500_ci', ''))
_charsets.add(Charset(224, 'utf8mb4', 'utf8mb4_unicode_ci', ''))
_charsets.add(Charset(225, 'utf8mb4', 'utf8mb4_icelandic_ci', ''))
_charsets.add(Charset(226, 'utf8mb4', 'utf8mb4_latvian_ci', ''))
_charsets.add(Charset(227, 'utf8mb4', 'utf8mb4_romanian_ci', ''))
_charsets.add(Charset(228, 'utf8mb4', 'utf8mb4_slovenian_ci', ''))
_charsets.add(Charset(229, 'utf8mb4', 'utf8mb4_polish_ci', ''))
_charsets.add(Charset(230, 'utf8mb4', 'utf8mb4_estonian_ci', ''))
_charsets.add(Charset(231, 'utf8mb4', 'utf8mb4_spanish_ci', ''))
_charsets.add(Charset(232, 'utf8mb4', 'utf8mb4_swedish_ci', ''))
_charsets.add(Charset(233, 'utf8mb4', 'utf8mb4_turkish_ci', ''))
_charsets.add(Charset(234, 'utf8mb4', 'utf8mb4_czech_ci', ''))
_charsets.add(Charset(235, 'utf8mb4', 'utf8mb4_danish_ci', ''))
_charsets.add(Charset(236, 'utf8mb4', 'utf8mb4_lithuanian_ci', ''))
_charsets.add(Charset(237, 'utf8mb4', 'utf8mb4_slovak_ci', ''))
_charsets.add(Charset(238, 'utf8mb4', 'utf8mb4_spanish2_ci', ''))
_charsets.add(Charset(239, 'utf8mb4', 'utf8mb4_roman_ci', ''))
_charsets.add(Charset(240, 'utf8mb4', 'utf8mb4_persian_ci', ''))
_charsets.add(Charset(241, 'utf8mb4', 'utf8mb4_esperanto_ci', ''))
_charsets.add(Charset(242, 'utf8mb4', 'utf8mb4_hungarian_ci', ''))
_charsets.add(Charset(243, 'utf8mb4', 'utf8mb4_sinhala_ci', ''))
_charsets.add(Charset(244, 'utf8mb4', 'utf8mb4_german2_ci', ''))
_charsets.add(Charset(245, 'utf8mb4', 'utf8mb4_croatian_ci', ''))
_charsets.add(Charset(246, 'utf8mb4', 'utf8mb4_unicode_520_ci', ''))
_charsets.add(Charset(247, 'utf8mb4', 'utf8mb4_vietnamese_ci', ''))
_charsets.add(Charset(248, 'gb18030', 'gb18030_chinese_ci', 'Yes'))
_charsets.add(Charset(249, 'gb18030', 'gb18030_bin', ''))
_charsets.add(Charset(250, 'gb18030', 'gb18030_unicode_520_ci', ''))
_charsets.add(Charset(255, 'utf8mb4', 'utf8mb4_0900_ai_ci', ''))
charset_by_name = _charsets.by_name
charset_by_id = _charsets.by_id
#TODO: remove this
def charset_to_encoding(name):
"""Convert MySQL's charset name to Python's codec name"""
if name in ('utf8mb4', 'utf8mb3'):
return 'utf8'
return name | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/charset.py | charset.py |
import struct
from zdppy_mysql.constants import ER
class MySQLError(Exception):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
error_map = {}
def _map_error(exc, *errors):
for error in errors:
error_map[error] = exc
_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR,
ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME,
ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE,
ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION,
ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION,
ER.WRONG_DB_NAME, ER.WRONG_COLUMN_NAME,
)
_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL,
ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL,
ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW)
_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW,
ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2,
ER.CANNOT_ADD_FOREIGN, ER.BAD_NULL_ERROR)
_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK,
ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE)
_map_error(OperationalError, ER.DBACCESS_DENIED_ERROR, ER.ACCESS_DENIED_ERROR,
ER.CON_COUNT_ERROR, ER.TABLEACCESS_DENIED_ERROR,
ER.COLUMNACCESS_DENIED_ERROR, ER.CONSTRAINT_FAILED, ER.LOCK_DEADLOCK)
del _map_error, ER
def raise_mysql_exception(data):
"""
抛出mysql异常
:param data: 异常信息
:return:
"""
# 异常编号
errno = struct.unpack('<h', data[1:3])[0]
# 异常信息
is_41 = data[3:4] == b"#"
if is_41:
# client protocol 4.1
errval = data[9:].decode('utf-8', 'replace')
else:
errval = data[3:].decode('utf-8', 'replace')
# 异常类
errorclass = error_map.get(errno, InternalError)
# 抛出异常信息
raise errorclass(errno, errval) | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/err.py | err.py |
from ._compat import text_type, PY2
from zdppy_mysql.constants import CLIENT
from .err import OperationalError
from .util import byte2int, int2byte
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
_have_cryptography = True
except ImportError:
_have_cryptography = False
from functools import partial
import hashlib
import io
import struct
import warnings
DEBUG = False
SCRAMBLE_LENGTH = 20
sha1_new = partial(hashlib.new, 'sha1')
# mysql_native_password
# https://dev.mysql.com/doc/internals/en/secure-password-authentication.html#packet-Authentication::Native41
def scramble_native_password(password, message):
"""Scramble used for mysql_native_password"""
if not password:
return b''
stage1 = sha1_new(password).digest()
stage2 = sha1_new(stage1).digest()
s = sha1_new()
s.update(message[:SCRAMBLE_LENGTH])
s.update(stage2)
result = s.digest()
return _my_crypt(result, stage1)
def _my_crypt(message1, message2):
result = bytearray(message1)
if PY2:
message2 = bytearray(message2)
for i in range(len(result)):
result[i] ^= message2[i]
return bytes(result)
# old_passwords support ported from libmysql/password.c
# https://dev.mysql.com/doc/internals/en/old-password-authentication.html
SCRAMBLE_LENGTH_323 = 8
class RandStruct_323(object):
def __init__(self, seed1, seed2):
self.max_value = 0x3FFFFFFF
self.seed1 = seed1 % self.max_value
self.seed2 = seed2 % self.max_value
def my_rnd(self):
self.seed1 = (self.seed1 * 3 + self.seed2) % self.max_value
self.seed2 = (self.seed1 + self.seed2 + 33) % self.max_value
return float(self.seed1) / float(self.max_value)
def scramble_old_password(password, message):
"""Scramble for old_password"""
warnings.warn("old password (for MySQL <4.1) is used. Upgrade your password with newer auth method.\n"
"old password support will be removed in future PyMySQL version")
hash_pass = _hash_password_323(password)
hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323])
hash_pass_n = struct.unpack(">LL", hash_pass)
hash_message_n = struct.unpack(">LL", hash_message)
rand_st = RandStruct_323(
hash_pass_n[0] ^ hash_message_n[0], hash_pass_n[1] ^ hash_message_n[1]
)
outbuf = io.BytesIO()
for _ in range(min(SCRAMBLE_LENGTH_323, len(message))):
outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64))
extra = int2byte(int(rand_st.my_rnd() * 31))
out = outbuf.getvalue()
outbuf = io.BytesIO()
for c in out:
outbuf.write(int2byte(byte2int(c) ^ byte2int(extra)))
return outbuf.getvalue()
def _hash_password_323(password):
nr = 1345345333
add = 7
nr2 = 0x12345671
# x in py3 is numbers, p27 is chars
for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]:
nr ^= (((nr & 63) + add) * c) + (nr << 8) & 0xFFFFFFFF
nr2 = (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF
add = (add + c) & 0xFFFFFFFF
r1 = nr & ((1 << 31) - 1) # kill sign bits
r2 = nr2 & ((1 << 31) - 1)
return struct.pack(">LL", r1, r2)
# sha256_password
def _roundtrip(conn, send_data):
conn.write_packet(send_data)
pkt = conn._read_packet()
pkt.check_error()
return pkt
def _xor_password(password, salt):
password_bytes = bytearray(password)
salt = bytearray(salt) # for PY2 compat.
salt_len = len(salt)
for i in range(len(password_bytes)):
password_bytes[i] ^= salt[i % salt_len]
return bytes(password_bytes)
def sha2_rsa_encrypt(password, salt, public_key):
"""Encrypt password with salt and public_key.
Used for sha256_password and caching_sha2_password.
"""
if not _have_cryptography:
raise RuntimeError("cryptography is required for sha256_password or caching_sha2_password")
message = _xor_password(password + b'\0', salt)
rsa_key = serialization.load_pem_public_key(public_key, default_backend())
return rsa_key.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
def sha256_password_auth(conn, pkt):
if conn._secure:
if DEBUG:
print("sha256: Sending plain password")
data = conn.password + b'\0'
return _roundtrip(conn, data)
if pkt.is_auth_switch_request():
conn.salt = pkt.read_all()
if not conn.server_public_key and conn.password:
# Request server public key
if DEBUG:
print("sha256: Requesting server public key")
pkt = _roundtrip(conn, b'\1')
if pkt.is_extra_auth_data():
conn.server_public_key = pkt._data[1:]
if DEBUG:
print("Received public key:\n", conn.server_public_key.decode('ascii'))
if conn.password:
if not conn.server_public_key:
raise OperationalError("Couldn't receive server's public key")
data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key)
else:
data = b''
return _roundtrip(conn, data)
def scramble_caching_sha2(password, nonce):
# (bytes, bytes) -> bytes
"""Scramble algorithm used in cached_sha2_password fast path.
XOR(SHA256(password), SHA256(SHA256(SHA256(password)), nonce))
"""
if not password:
return b''
p1 = hashlib.sha256(password).digest()
p2 = hashlib.sha256(p1).digest()
p3 = hashlib.sha256(p2 + nonce).digest()
res = bytearray(p1)
if PY2:
p3 = bytearray(p3)
for i in range(len(p3)):
res[i] ^= p3[i]
return bytes(res)
def caching_sha2_password_auth(conn, pkt):
# No password fast path
if not conn.password:
return _roundtrip(conn, b'')
if pkt.is_auth_switch_request():
# Try from fast auth
if DEBUG:
print("caching sha2: Trying fast path")
conn.salt = pkt.read_all()
scrambled = scramble_caching_sha2(conn.password, conn.salt)
pkt = _roundtrip(conn, scrambled)
# else: fast auth is tried in initial handshake
if not pkt.is_extra_auth_data():
raise OperationalError(
"caching sha2: Unknown packet for fast auth: %s" % pkt._data[:1]
)
# magic numbers:
# 2 - request public key
# 3 - fast auth succeeded
# 4 - need full auth
pkt.advance(1)
n = pkt.read_uint8()
if n == 3:
if DEBUG:
print("caching sha2: succeeded by fast path.")
pkt = conn._read_packet()
pkt.check_error() # pkt must be OK packet
return pkt
if n != 4:
raise OperationalError("caching sha2: Unknwon result for fast auth: %s" % n)
if DEBUG:
print("caching sha2: Trying full auth...")
if conn._secure:
if DEBUG:
print("caching sha2: Sending plain password via secure connection")
return _roundtrip(conn, conn.password + b'\0')
if not conn.server_public_key:
pkt = _roundtrip(conn, b'\x02') # Request public key
if not pkt.is_extra_auth_data():
raise OperationalError(
"caching sha2: Unknown packet for public key: %s" % pkt._data[:1]
)
conn.server_public_key = pkt._data[1:]
if DEBUG:
print(conn.server_public_key.decode('ascii'))
data = sha2_rsa_encrypt(conn.password, conn.salt, conn.server_public_key)
pkt = _roundtrip(conn, data) | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/_auth.py | _auth.py |
from __future__ import print_function
from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON
import errno
import io
import os
import socket
import struct
import sys
import traceback
import warnings
from zdppy_mysql import _auth
from .charset import charset_by_name, charset_by_id
from zdppy_mysql.constants import CLIENT, COMMAND, CR, FIELD_TYPE, SERVER_STATUS
from zdppy_mysql import converters
from .cursors import Cursor
from .optionfile import Parser
from .protocol import (
dump_packet, MysqlPacket, FieldDescriptorPacket, OKPacketWrapper,
EOFPacketWrapper, LoadLocalPacketWrapper
)
from .util import byte2int, int2byte
from zdppy_mysql import err, VERSION_STRING
try:
import ssl
SSL_ENABLED = True
except ImportError:
ssl = None
SSL_ENABLED = False
try:
import getpass
DEFAULT_USER = getpass.getuser()
del getpass
except (ImportError, KeyError):
# KeyError occurs when there's no entry in OS database for a current user.
DEFAULT_USER = None
DEBUG = False
_py_version = sys.version_info[:2]
if PY2:
pass
elif _py_version < (3, 6):
# See http://bugs.python.org/issue24870
_surrogateescape_table = [chr(i) if i < 0x80 else chr(i + 0xdc00) for i in range(256)]
def _fast_surrogateescape(s):
return s.decode('latin1').translate(_surrogateescape_table)
else:
def _fast_surrogateescape(s):
return s.decode('ascii', 'surrogateescape')
# socket.makefile() in Python 2 is not usable because very inefficient and
# bad behavior about timeout.
# XXX: ._socketio doesn't work under IronPython.
if PY2 and not IRONPYTHON:
# read method of file-like returned by sock.makefile() is very slow.
# So we copy io-based one from Python 3.
from ._socketio import SocketIO
def _makefile(sock, mode):
return io.BufferedReader(SocketIO(sock, mode))
else:
# socket.makefile in Python 3 is nice.
def _makefile(sock, mode):
return sock.makefile(mode)
TEXT_TYPES = {
FIELD_TYPE.BIT,
FIELD_TYPE.BLOB,
FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB,
FIELD_TYPE.STRING,
FIELD_TYPE.TINY_BLOB,
FIELD_TYPE.VAR_STRING,
FIELD_TYPE.VARCHAR,
FIELD_TYPE.GEOMETRY,
}
DEFAULT_CHARSET = 'utf8mb4'
MAX_PACKET_LEN = 2**24-1
def pack_int24(n):
return struct.pack('<I', n)[:3]
# https://dev.mysql.com/doc/internals/en/integer.html#packet-Protocol::LengthEncodedInteger
def lenenc_int(i):
if (i < 0):
raise ValueError("Encoding %d is less than 0 - no representation in LengthEncodedInteger" % i)
elif (i < 0xfb):
return int2byte(i)
elif (i < (1 << 16)):
return b'\xfc' + struct.pack('<H', i)
elif (i < (1 << 24)):
return b'\xfd' + struct.pack('<I', i)[:3]
elif (i < (1 << 64)):
return b'\xfe' + struct.pack('<Q', i)
else:
raise ValueError("Encoding %x is larger than %x - no representation in LengthEncodedInteger" % (i, (1 << 64)))
class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
Establish a connection to the MySQL database. Accepts several
arguments:
:param host: Host where the database server is located
:param user: Username to log in as
:param password: Password to use.
:param database: Database to use, None to not use a particular one.
:param port: MySQL port to use, default is usually OK. (default: 3306)
:param bind_address: When the client has multiple network interfaces, specify
the interface from which to connect to the host. Argument can be
a hostname or an IP address.
:param unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
:param read_timeout: The timeout for reading from the connection in seconds (default: None - no timeout)
:param write_timeout: The timeout for writing to the connection in seconds (default: None - no timeout)
:param charset: Charset you want to use.
:param sql_mode: Default SQL_MODE to use.
:param read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
:param conv:
Conversion dictionary to use instead of the default one.
This is used to provide custom marshalling and unmarshaling of types.
See converters.
:param use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
:param client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
:param cursorclass: Custom cursor class to use.
:param init_command: Initial SQL statement to run when connection is established.
:param connect_timeout: Timeout before throwing an exception when connecting.
(default: 10, min: 1, max: 31536000)
:param ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
:param read_default_group: Group to read from in the configuration file.
:param compress: Not supported
:param named_pipe: Not supported
:param autocommit: Autocommit mode. None means use server default. (default: False)
:param local_infile: Boolean to enable the use of LOAD DATA LOCAL command. (default: False)
:param max_allowed_packet: Max size of packet sent to server in bytes. (default: 16MB)
Only used to limit size of "LOAD LOCAL INFILE" data packet smaller than default (16KB).
:param defer_connect: Don't explicitly connect on contruction - wait for connect call.
(default: False)
:param auth_plugin_map: A dict of plugin names to a class that processes that plugin.
The class will take the Connection object as the argument to the constructor.
The class needs an authenticate method taking an authentication packet as
an argument. For the dialog plugin, a prompt(echo, prompt) method can be used
(if no authenticate method) for returning a string from the user. (experimental)
:param server_public_key: SHA256 authenticaiton plugin public key value. (default: None)
:param db: Alias for database. (for compatibility to MySQLdb)
:param passwd: Alias for password. (for compatibility to MySQLdb)
:param binary_prefix: Add _binary prefix on bytes and bytearray. (default: False)
See `Connection <https://www.python.org/dev/peps/pep-0249/#connection-objects>`_ in the
specification.
"""
_sock = None
_auth_plugin_name = ''
_closed = False
_secure = False
def __init__(self, host=None, user=None, password="",
database=None, port=0, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=None, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=10, ssl=None, read_default_group=None,
compress=None, named_pipe=None,
autocommit=False, db=None, passwd=None, local_infile=False,
max_allowed_packet=16*1024*1024, defer_connect=False,
auth_plugin_map=None, read_timeout=None, write_timeout=None,
bind_address=None, binary_prefix=False, program_name=None,
server_public_key=None):
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
self._local_infile = bool(local_infile)
if self._local_infile:
client_flag |= CLIENT.LOCAL_FILES
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = Parser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, arg):
if arg:
return arg
try:
return cfg.get(read_default_group, key)
except Exception:
return arg
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
bind_address = _config("bind-address", bind_address)
charset = _config("default-character-set", charset)
if not ssl:
ssl = {}
if isinstance(ssl, dict):
for key in ["ca", "capath", "cert", "key", "cipher"]:
value = _config("ssl-" + key, ssl.get(key))
if value:
ssl[key] = value
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
self.ctx = self._create_ssl_ctx(ssl)
self.host = host or "localhost"
self.port = port or 3306
self.user = user or DEFAULT_USER
self.password = password or b""
if isinstance(self.password, text_type):
self.password = self.password.encode('latin1')
self.db = database
self.unix_socket = unix_socket
self.bind_address = bind_address
if not (0 < connect_timeout <= 31536000):
raise ValueError("connect_timeout should be >0 and <=31536000")
self.connect_timeout = connect_timeout or None
if read_timeout is not None and read_timeout <= 0:
raise ValueError("read_timeout should be >= 0")
self._read_timeout = read_timeout
if write_timeout is not None and write_timeout <= 0:
raise ValueError("write_timeout should be >= 0")
self._write_timeout = write_timeout
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
# specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
if conv is None:
conv = converters.conversions
# Need for MySQLdb compatibility.
self.encoders = {k: v for (k, v) in conv.items() if type(k) is not int}
self.decoders = {k: v for (k, v) in conv.items() if type(k) is int}
self.sql_mode = sql_mode
self.init_command = init_command
self.max_allowed_packet = max_allowed_packet
self._auth_plugin_map = auth_plugin_map or {}
self._binary_prefix = binary_prefix
self.server_public_key = server_public_key
self._connect_attrs = {
'_client_name': 'pymysql',
'_pid': str(os.getpid()),
'_client_version': VERSION_STRING,
}
if program_name:
self._connect_attrs["program_name"] = program_name
if defer_connect:
self._sock = None
else:
self.connect()
def _create_ssl_ctx(self, sslp):
if isinstance(sslp, ssl.SSLContext):
return sslp
ca = sslp.get('ca')
capath = sslp.get('capath')
hasnoca = ca is None and capath is None
ctx = ssl.create_default_context(cafile=ca, capath=capath)
ctx.check_hostname = not hasnoca and sslp.get('check_hostname', True)
ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED
if 'cert' in sslp:
ctx.load_cert_chain(sslp['cert'], keyfile=sslp.get('key'))
if 'cipher' in sslp:
ctx.set_ciphers(sslp['cipher'])
ctx.options |= ssl.OP_NO_SSLv2
ctx.options |= ssl.OP_NO_SSLv3
return ctx
def close(self):
"""
Send the quit message and close the socket.
See `Connection.close() <https://www.python.org/dev/peps/pep-0249/#Connection.close>`_
in the specification.
:raise Error: If the connection is already closed.
"""
if self._closed:
raise err.Error("Already closed")
self._closed = True
if self._sock is None:
return
send_data = struct.pack('<iB', 1, COMMAND.COM_QUIT)
try:
self._write_bytes(send_data)
except Exception:
pass
finally:
self._force_close()
@property
def open(self):
"""Return True if the connection is open"""
return self._sock is not None
def _force_close(self):
"""Close connection without QUIT message"""
if self._sock:
try:
self._sock.close()
except: # noqa
pass
self._sock = None
self._rfile = None
__del__ = _force_close
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
def _read_ok_packet(self):
pkt = self._read_packet()
if not pkt.is_ok_packet():
raise err.OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
return ok
def _send_autocommit_mode(self):
"""Set whether or not to commit after every execute()"""
self._execute_command(COMMAND.COM_QUERY, "SET AUTOCOMMIT = %s" %
self.escape(self.autocommit_mode))
self._read_ok_packet()
def begin(self):
"""Begin transaction."""
self._execute_command(COMMAND.COM_QUERY, "BEGIN")
self._read_ok_packet()
def commit(self):
"""
Commit changes to stable storage.
See `Connection.commit() <https://www.python.org/dev/peps/pep-0249/#commit>`_
in the specification.
"""
self._execute_command(COMMAND.COM_QUERY, "COMMIT")
self._read_ok_packet()
def rollback(self):
"""
Roll back the current transaction.
See `Connection.rollback() <https://www.python.org/dev/peps/pep-0249/#rollback>`_
in the specification.
"""
self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
self._read_ok_packet()
def show_warnings(self):
"""Send the "SHOW WARNINGS" SQL command."""
self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
result.read()
return result.rows
def select_db(self, db):
"""
Set current db.
:param db: The name of the db.
"""
self._execute_command(COMMAND.COM_INIT_DB, db)
self._read_ok_packet()
def escape(self, obj, mapping=None):
"""Escape whatever value you pass to it.
Non-standard, for internal use; do not use this in your applications.
"""
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
if isinstance(obj, (bytes, bytearray)):
ret = self._quote_bytes(obj)
if self._binary_prefix:
ret = "_binary" + ret
return ret
return converters.escape_item(obj, self.charset, mapping=mapping)
def literal(self, obj):
"""Alias for escape()
Non-standard, for internal use; do not use this in your applications.
"""
return self.escape(obj, self.encoders)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return converters.escape_string(s)
def _quote_bytes(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return "'%s'" % (_fast_surrogateescape(s.replace(b"'", b"''")),)
return converters.escape_bytes(s)
def cursor(self, cursor=None):
"""
Create a new cursor to execute queries with.
:param cursor: The type of cursor to create; one of :py:class:`Cursor`,
:py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`.
None means use Cursor.
"""
if cursor:
return cursor(self)
return self.cursorclass(self)
def __enter__(self):
"""Context manager that returns a Cursor"""
warnings.warn(
"Context manager API of Connection object is deprecated; Use conn.begin()",
DeprecationWarning)
return self.cursor()
def __exit__(self, exc, value, traceback):
"""On successful exit, commit. On exception, rollback"""
if exc:
self.rollback()
else:
self.commit()
# The following methods are INTERNAL USE ONLY (called from Cursor)
def query(self, sql, unbuffered=False):
# if DEBUG:
# print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
if PY2:
sql = sql.encode(self.encoding)
else:
sql = sql.encode(self.encoding, 'surrogateescape')
self._execute_command(COMMAND.COM_QUERY, sql)
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def next_result(self, unbuffered=False):
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def affected_rows(self):
return self._affected_rows
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
return self._read_ok_packet()
def ping(self, reconnect=True):
"""
Check if the server is alive.
:param reconnect: If the connection is closed, reconnect.
:raise Error: If the connection is closed and reconnect=False.
"""
if self._sock is None:
if reconnect:
self.connect()
reconnect = False
else:
raise err.Error("Already closed")
try:
self._execute_command(COMMAND.COM_PING, "")
self._read_ok_packet()
except Exception:
if reconnect:
self.connect()
self.ping(False)
else:
raise
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
self._read_packet()
self.charset = charset
self.encoding = encoding
def connect(self, sock=None):
self._closed = False
try:
if sock is None:
if self.unix_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.connect_timeout)
sock.connect(self.unix_socket)
self.host_info = "Localhost via UNIX socket"
self._secure = True
if DEBUG: print('connected using unix_socket')
else:
kwargs = {}
if self.bind_address is not None:
kwargs['source_address'] = (self.bind_address, 0)
while True:
try:
sock = socket.create_connection(
(self.host, self.port), self.connect_timeout,
**kwargs)
break
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
self.host_info = "socket %s:%d" % (self.host, self.port)
if DEBUG: print('connected using socket')
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(None)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._sock = sock
self._rfile = _makefile(sock, 'rb')
self._next_seq_id = 0
self._get_server_information()
self._request_authentication()
if self.sql_mode is not None:
c = self.cursor()
c.execute("SET sql_mode=%s", (self.sql_mode,))
if self.init_command is not None:
c = self.cursor()
c.execute(self.init_command)
c.close()
self.commit()
if self.autocommit_mode is not None:
self.autocommit(self.autocommit_mode)
except BaseException as e:
self._rfile = None
if sock is not None:
try:
sock.close()
except: # noqa
pass
if isinstance(e, (OSError, IOError, socket.error)):
exc = err.OperationalError(
2003,
"Can't connect to MySQL server on %r (%s)" % (
self.host, e))
# Keep original exception and traceback to investigate error.
exc.original_exception = e
exc.traceback = traceback.format_exc()
if DEBUG: print(exc.traceback)
raise exc
# If e is neither DatabaseError or IOError, It's a bug.
# But raising AssertionError hides original error.
# So just reraise it.
raise
def write_packet(self, payload):
"""Writes an entire "mysql packet" in its entirety to the network
addings its length and sequence number.
"""
# Internal note: when you build packet manualy and calls _write_bytes()
# directly, you should set self._next_seq_id properly.
data = pack_int24(len(payload)) + int2byte(self._next_seq_id) + payload
if DEBUG: dump_packet(data)
self._write_bytes(data)
self._next_seq_id = (self._next_seq_id + 1) % 256
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError: If the packet sequence number is wrong.
"""
buff = b''
while True:
packet_header = self._read_bytes(4)
#if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
if packet_number != self._next_seq_id:
self._force_close()
if packet_number == 0:
# MariaDB sends error packet with seqno==0 when shutdown
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query")
raise err.InternalError(
"Packet sequence number wrong - got %d expected %d"
% (packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
recv_data = self._read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self.encoding)
packet.check_error()
return packet
def _read_bytes(self, num_bytes):
self._sock.settimeout(self._read_timeout)
while True:
try:
data = self._rfile.read(num_bytes)
break
except (IOError, OSError) as e:
if e.errno == errno.EINTR:
continue
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query (%s)" % (e,))
except BaseException:
# Don't convert unknown exception to MySQLError.
self._force_close()
raise
if len(data) < num_bytes:
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_LOST, "Lost connection to MySQL server during query")
return data
def _write_bytes(self, data):
self._sock.settimeout(self._write_timeout)
try:
self._sock.sendall(data)
except IOError as e:
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_GONE_ERROR,
"MySQL server has gone away (%r)" % (e,))
def _read_query_result(self, unbuffered=False):
self._result = None
if unbuffered:
try:
result = MySQLResult(self)
result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
result.read()
self._result = result
if result.server_status is not None:
self.server_status = result.server_status
return result.affected_rows
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
def _execute_command(self, command, sql):
"""
:raise InterfaceError: If the connection is closed.
:raise ValueError: If no username was specified.
"""
if not self._sock:
raise err.InterfaceError("(0, '')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None:
if self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
self._result._finish_unbuffered_query()
while self._result.has_next:
self.next_result()
self._result = None
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
packet_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
# tiny optimization: build first packet manually instead of
# calling self..write_packet()
prelude = struct.pack('<iB', packet_size, command)
packet = prelude + sql[:packet_size-1]
self._write_bytes(packet)
if DEBUG: dump_packet(packet)
self._next_seq_id = 1
if packet_size < MAX_PACKET_LEN:
return
sql = sql[packet_size-1:]
while True:
packet_size = min(MAX_PACKET_LEN, len(sql))
self.write_packet(sql[:packet_size])
sql = sql[packet_size:]
if not sql and packet_size < MAX_PACKET_LEN:
break
def _request_authentication(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
if int(self.server_version.split('.', 1)[0]) >= 5:
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, MAX_PACKET_LEN, charset_id, b'')
if self.ssl and self.server_capabilities & CLIENT.SSL:
self.write_packet(data_init)
self._sock = self.ctx.wrap_socket(self._sock, server_hostname=self.host)
self._rfile = _makefile(self._sock, 'rb')
self._secure = True
data = data_init + self.user + b'\0'
authresp = b''
plugin_name = None
if self._auth_plugin_name == '':
plugin_name = b''
authresp = _auth.scramble_native_password(self.password, self.salt)
elif self._auth_plugin_name == 'mysql_native_password':
plugin_name = b'mysql_native_password'
authresp = _auth.scramble_native_password(self.password, self.salt)
elif self._auth_plugin_name == 'caching_sha2_password':
plugin_name = b'caching_sha2_password'
if self.password:
if DEBUG:
print("caching_sha2: trying fast path")
authresp = _auth.scramble_caching_sha2(self.password, self.salt)
else:
if DEBUG:
print("caching_sha2: empty password")
elif self._auth_plugin_name == 'sha256_password':
plugin_name = b'sha256_password'
if self.ssl and self.server_capabilities & CLIENT.SSL:
authresp = self.password + b'\0'
elif self.password:
authresp = b'\1' # request public key
else:
authresp = b'\0' # empty password
if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA:
data += lenenc_int(len(authresp)) + authresp
elif self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += struct.pack('B', len(authresp)) + authresp
else: # pragma: no cover - not testing against servers without secure auth (>=5.0)
data += authresp + b'\0'
if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + b'\0'
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
data += (plugin_name or b'') + b'\0'
if self.server_capabilities & CLIENT.CONNECT_ATTRS:
connect_attrs = b''
for k, v in self._connect_attrs.items():
k = k.encode('utf-8')
connect_attrs += struct.pack('B', len(k)) + k
v = v.encode('utf-8')
connect_attrs += struct.pack('B', len(v)) + v
data += struct.pack('B', len(connect_attrs)) + connect_attrs
self.write_packet(data)
auth_packet = self._read_packet()
# if authentication method isn't accepted the first byte
# will have the octet 254
if auth_packet.is_auth_switch_request():
if DEBUG: print("received auth switch")
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
auth_packet.read_uint8() # 0xfe packet identifier
plugin_name = auth_packet.read_string()
if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None:
auth_packet = self._process_auth(plugin_name, auth_packet)
else:
# send legacy handshake
data = _auth.scramble_old_password(self.password, self.salt) + b'\0'
self.write_packet(data)
auth_packet = self._read_packet()
elif auth_packet.is_extra_auth_data():
if DEBUG:
print("received extra data")
# https://dev.mysql.com/doc/internals/en/successful-authentication.html
if self._auth_plugin_name == "caching_sha2_password":
auth_packet = _auth.caching_sha2_password_auth(self, auth_packet)
elif self._auth_plugin_name == "sha256_password":
auth_packet = _auth.sha256_password_auth(self, auth_packet)
else:
raise err.OperationalError("Received extra packet for auth method %r", self._auth_plugin_name)
if DEBUG: print("Succeed to auth")
def _process_auth(self, plugin_name, auth_packet):
handler = self._get_auth_plugin_handler(plugin_name)
if handler:
try:
return handler.authenticate(auth_packet)
except AttributeError:
if plugin_name != b'dialog':
raise err.OperationalError(2059, "Authentication plugin '%s'"
" not loaded: - %r missing authenticate method" % (plugin_name, type(handler)))
if plugin_name == b"caching_sha2_password":
return _auth.caching_sha2_password_auth(self, auth_packet)
elif plugin_name == b"sha256_password":
return _auth.sha256_password_auth(self, auth_packet)
elif plugin_name == b"mysql_native_password":
data = _auth.scramble_native_password(self.password, auth_packet.read_all())
elif plugin_name == b"mysql_old_password":
data = _auth.scramble_old_password(self.password, auth_packet.read_all()) + b'\0'
elif plugin_name == b"mysql_clear_password":
# https://dev.mysql.com/doc/internals/en/clear-text-authentication.html
data = self.password + b'\0'
elif plugin_name == b"dialog":
pkt = auth_packet
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b"Password: ":
self.write_packet(self.password + b'\0')
elif handler:
resp = 'no response - TypeError within plugin.prompt method'
try:
resp = handler.prompt(echo, prompt)
self.write_packet(resp + b'\0')
except AttributeError:
raise err.OperationalError(2059, "Authentication plugin '%s'" \
" not loaded: - %r missing prompt method" % (plugin_name, handler))
except TypeError:
raise err.OperationalError(2061, "Authentication plugin '%s'" \
" %r didn't respond with string. Returned '%r' to prompt %r" % (plugin_name, handler, resp, prompt))
else:
raise err.OperationalError(2059, "Authentication plugin '%s' (%r) not configured" % (plugin_name, handler))
pkt = self._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
else:
raise err.OperationalError(2059, "Authentication plugin '%s' not configured" % plugin_name)
self.write_packet(data)
pkt = self._read_packet()
pkt.check_error()
return pkt
def _get_auth_plugin_handler(self, plugin_name):
plugin_class = self._auth_plugin_map.get(plugin_name)
if not plugin_class and isinstance(plugin_name, bytes):
plugin_class = self._auth_plugin_map.get(plugin_name.decode('ascii'))
if plugin_class:
try:
handler = plugin_class(self)
except TypeError:
raise err.OperationalError(2059, "Authentication plugin '%s'"
" not loaded: - %r cannot be constructed with connection object" % (plugin_name, plugin_class))
else:
handler = None
return handler
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
def _get_server_information(self):
i = 0
packet = self._read_packet()
data = packet.get_all_data()
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(b'\0', i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
# TODO: deprecate server_language and server_charset.
# mysqlclient-python doesn't provide it.
self.server_language = lang
try:
self.server_charset = charset_by_id(lang).name
except KeyError:
# unknown collation
self.server_charset = None
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
i += salt_len
i+=1
# AUTH PLUGIN NAME may appear here.
if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i:
# Due to Bug#59453 the auth-plugin-name is missing the terminating
# NUL-char in versions prior to 5.5.10 and 5.6.2.
# ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
# didn't use version checks as mariadb is corrected and reports
# earlier than those two.
server_end = data.find(b'\0', i)
if server_end < 0: # pragma: no cover - very specific upstream bug
# not found \0 and last field so take it all
self._auth_plugin_name = data[i:].decode('utf-8')
else:
self._auth_plugin_name = data[i:server_end].decode('utf-8')
def get_server_info(self):
return self.server_version
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class MySQLResult(object):
def __init__(self, connection):
"""
:type connection: Connection
"""
self.connection = connection
self.affected_rows = None
self.insert_id = None
self.server_status = None
self.warning_count = 0
self.message = None
self.field_count = 0
self.description = None
self.rows = None
self.has_next = None
self.unbuffered_active = False
def __del__(self):
if self.unbuffered_active:
self._finish_unbuffered_query()
def read(self):
try:
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
else:
self._read_result_packet(first_packet)
finally:
self.connection = None
def init_unbuffered_query(self):
"""
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError:
"""
self.unbuffered_active = True
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
self.unbuffered_active = False
self.connection = None
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
self.unbuffered_active = False
self.connection = None
else:
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
# Apparently, MySQLdb picks this number because it's the maximum
# value of a 64bit unsigned integer. Since we're emulating MySQLdb,
# we set it to this instead of None, which would be preferred.
self.affected_rows = 18446744073709551615
def _read_ok_packet(self, first_packet):
ok_packet = OKPacketWrapper(first_packet)
self.affected_rows = ok_packet.affected_rows
self.insert_id = ok_packet.insert_id
self.server_status = ok_packet.server_status
self.warning_count = ok_packet.warning_count
self.message = ok_packet.message
self.has_next = ok_packet.has_next
def _read_load_local_packet(self, first_packet):
if not self.connection._local_infile:
raise RuntimeError(
"**WARN**: Received LOAD_LOCAL packet but local_infile option is false.")
load_packet = LoadLocalPacketWrapper(first_packet)
sender = LoadLocalFile(load_packet.filename, self.connection)
try:
sender.send_data()
except:
self.connection._read_packet() # skip ok packet
raise
ok_packet = self.connection._read_packet()
if not ok_packet.is_ok_packet(): # pragma: no cover - upstream induced protocol error
raise err.OperationalError(2014, "Commands Out of Sync")
self._read_ok_packet(ok_packet)
def _check_packet_is_eof(self, packet):
if not packet.is_eof_packet():
return False
#TODO: Support CLIENT.DEPRECATE_EOF
# 1) Add DEPRECATE_EOF to CAPABILITIES
# 2) Mask CAPABILITIES with server_capabilities
# 3) if server_capabilities & CLIENT.DEPRECATE_EOF: use OKPacketWrapper instead of EOFPacketWrapper
wp = EOFPacketWrapper(packet)
self.warning_count = wp.warning_count
self.has_next = wp.has_next
return True
def _read_result_packet(self, first_packet):
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
self._read_rowdata_packet()
def _read_rowdata_packet_unbuffered(self):
# Check if in an active query
if not self.unbuffered_active:
return
# EOF
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None
self.rows = None
return
row = self._read_row_from_packet(packet)
self.affected_rows = 1
self.rows = (row,) # rows should tuple of row for MySQL-python compatibility.
return row
def _finish_unbuffered_query(self):
# After much reading on the MySQL protocol, it appears that there is,
# in fact, no way to stop MySQL from sending all the data after
# executing a query, so we just spin, and wait for an EOF packet.
while self.unbuffered_active:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None # release reference to kill cyclic reference.
def _read_rowdata_packet(self):
"""Read a rowdata packet for each data row in the result set."""
rows = []
while True:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.connection = None # release reference to kill cyclic reference.
break
rows.append(self._read_row_from_packet(packet))
self.affected_rows = len(rows)
self.rows = tuple(rows)
def _read_row_from_packet(self, packet):
row = []
for encoding, converter in self.converters:
try:
data = packet.read_length_coded_string()
except IndexError:
# No more columns in this row
# See https://github.com/PyMySQL/PyMySQL/pull/434
break
if data is not None:
if encoding is not None:
data = data.decode(encoding)
if DEBUG: print("DEBUG: DATA = ", data)
if converter is not None:
data = converter(data)
row.append(data)
return tuple(row)
def _get_descriptions(self):
"""Read a column descriptor packet for each column in the result."""
self.fields = []
self.converters = []
use_unicode = self.connection.use_unicode
conn_encoding = self.connection.encoding
description = []
for i in range_type(self.field_count):
field = self.connection._read_packet(FieldDescriptorPacket)
self.fields.append(field)
description.append(field.description())
field_type = field.type_code
if use_unicode:
if field_type == FIELD_TYPE.JSON:
# When SELECT from JSON column: charset = binary
# When SELECT CAST(... AS JSON): charset = connection encoding
# This behavior is different from TEXT / BLOB.
# We should decode result by connection encoding regardless charsetnr.
# See https://github.com/PyMySQL/PyMySQL/issues/488
encoding = conn_encoding # SELECT CAST(... AS JSON)
elif field_type in TEXT_TYPES:
if field.charsetnr == 63: # binary
# TEXTs with charset=binary means BINARY types.
encoding = None
else:
encoding = conn_encoding
else:
# Integers, Dates and Times, and other basic data is encoded in ascii
encoding = 'ascii'
else:
encoding = None
converter = self.connection.decoders.get(field_type)
if converter is converters.through:
converter = None
if DEBUG: print("DEBUG: field={}, converter={}".format(field, converter))
self.converters.append((encoding, converter))
eof_packet = self.connection._read_packet()
assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF'
self.description = tuple(description)
class LoadLocalFile(object):
def __init__(self, filename, connection):
self.filename = filename
self.connection = connection
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection._sock:
raise err.InterfaceError("(0, '')")
conn = self.connection
try:
with open(self.filename, 'rb') as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename))
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b'') | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/connections.py | connections.py |
from typing import List, Any
from .exceptions import ParamError
def get_add_sql(table: str, columns: List[str]):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not columns or not isinstance(columns, list):
raise ParamError(f"columns 参数错误:columns={columns}")
# 准备参数
column_str = ", ".join(columns)
values = ["%s" for _ in columns]
values_str = ", ".join(values)
# 准备sql
s = f"insert into {table} ({column_str}) values ({values_str});"
return s
def get_add_many_sql(table: str, columns: List[str], values_length: int):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not columns or not isinstance(columns, list):
raise ParamError(f"columns 参数错误:columns={columns}")
if not values_length or not isinstance(values_length, int):
raise ParamError(f"values_length 参数错误:values_length={values_length}")
# 准备参数
column_str = ",".join(columns)
values = ["%s" for _ in columns]
values_str = ",".join(values)
values_str = f"({values_str})"
values_strs = [values_str for _ in range(values_length)]
values_strs = ", ".join(values_strs)
# 准备sql
s = f"insert into {table} ({column_str}) values {values_strs};"
return s
def get_sql_delete_by_id(table: str):
"""
获取添加数据的字符串
:param table:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
# 准备sql
s = f"delete from {table} where id = %s;"
return s
def get_sql_delete_by_ids(table: str, ids_length: int):
"""
获取添加数据的字符串
:param table:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not ids_length or not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"delete from {table} where id in ({ids_str});"
return s
def get_sql_update_by_id(table: str, columns: List[str]):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not columns or not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
# 准备参数
kvs = [f"{columns[i]}=%s" for i in range(len(columns))]
ids_str = ", ".join(kvs)
# 准备sql
s = f"update {table} set {ids_str} where id = %s;"
return s
def get_sql_update_by_ids(table: str, columns: List[str], ids_length: int):
"""
获取添加数据的字符串
:param table:
:param columns:
:param ids_length:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if not columns or not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
if not ids_length or not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
kvs = [f"{columns[i]}=%s" for i in range(len(columns))]
kvs_str = ", ".join(kvs)
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"update {table} set {kvs_str} where id in ({ids_str});"
return s
def get_sql_find_by_id(table: str, columns: List[str]):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if columns and not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
# 准备参数
columns_str = "*"
if columns is not None:
columns_str = ", ".join(columns)
# 准备sql
s = f"select {columns_str} from {table} where id = %s;"
return s
def get_sql_find_by_ids(table: str, columns: List[str], ids_length: int):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if columns and not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
if ids_length and not isinstance(ids_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={ids_length}")
# 准备参数
columns_str = "*"
if columns is not None:
columns_str = ", ".join(columns)
ids = ["%s" for _ in range(ids_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"select {columns_str} from {table} where id in ({ids_str});"
return s
def get_sql_find_all(
table: str,
columns: List[str]):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if columns and not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
# 准备参数
columns_str = "*"
if columns is not None:
columns_str = ", ".join(columns)
# 准备sql
s = f"select {columns_str} from {table};"
return s
def get_sql_find_column_in(table: str, columns: List[str], column: str, values_length: int):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if columns and not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
if values_length and not isinstance(values_length, int):
raise ParamError(f"ids_length 参数错误:ids_length={values_length}")
# 准备参数
columns_str = "*"
if columns is not None:
columns_str = ", ".join(columns)
ids = ["%s" for _ in range(values_length)]
ids_str = ", ".join(ids)
# 准备sql
s = f"select {columns_str} from {table} where {column} in ({ids_str});"
return s
def get_sql_find_by_page(table: str, columns: List[str],
page: int = 1,
size: int = 20,
asc_columns: List[str] = None,
desc_columns: List[str] = None):
"""
获取添加数据的字符串
:param table:
:param columns:
:return:
"""
# 校验数据
if not table:
raise ParamError(f"table 参数错误:table={table}")
if columns and not isinstance(columns, List):
raise ParamError(f"columns 参数错误:columns={columns}")
if page <= 0:
page = 1
if size <= 0 or size > 100:
size = 20
# 准备参数
columns_str = "*"
if columns is not None:
columns_str = ", ".join(columns)
# 准备sql
s = f"select {columns_str} from {table}"
# 排序
if asc_columns is not None or desc_columns is not None:
s = f"{s} order by"
if asc_columns is not None:
asc_str = ", ".join(asc_columns)
s = f"{s} {asc_str} asc"
if desc_columns is not None:
desc_str = ", ".join(desc_columns)
s = f"{s} {desc_str} desc"
# 分页
offset = (page - 1) * size
s = f"{s} limit {size} offset {offset}"
return s
def get_create_table_sql(
table: str,
id_column=None,
columns: List = None,
open_engine=True,
open_common: bool = True,
):
"""
获取创建表格的SQL语句
:return: 创建表格的SQL语句
"""
# 处理id列
if id_column is None:
id_column = "id bigint primary key auto_increment,"
else:
id_column = f"{id_column},"
# 处理columns列表
if columns is None:
raise ParamError("columns不能为空")
# 添加公共字段
if open_common:
columns.append("create_time datetime not null default current_timestamp") # 创建时机
columns.append("update_time timestamp not null on update current_timestamp default current_timestamp") # 更新时间
columns.append("delete_time timestamp null") # 删除时间
columns_str = ",".join(columns)
# 引擎
engine_str = ";"
if open_engine:
engine_str = "engine = innodb charset=utf8mb4;"
# 整理SQL语句
s = f"create table if not exists {table} ({id_column} {columns_str}) {engine_str}"
# 返回SQL语句
return s
if __name__ == '__main__':
# print(get_add_sql("student", ["name", "age"]))
print(get_add_many_sql("student", ["name", "age"]), [["张三", 22], ["李四", 33]]) | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/sql.py | sql.py |
from __future__ import print_function
from .charset import MBLENGTH
from ._compat import PY2, range_type
from zdppy_mysql.constants import FIELD_TYPE, SERVER_STATUS
from zdppy_mysql import err
from .util import byte2int
import struct
import sys
DEBUG = False
NULL_COLUMN = 251
UNSIGNED_CHAR_COLUMN = 251
UNSIGNED_SHORT_COLUMN = 252
UNSIGNED_INT24_COLUMN = 253
UNSIGNED_INT64_COLUMN = 254
def dump_packet(data): # pragma: no cover
def printable(data):
if 32 <= byte2int(data) < 127:
if isinstance(data, int):
return chr(data)
return data
return '.'
try:
print("packet length:", len(data))
for i in range(1, 7):
f = sys._getframe(i)
print("call[%d]: %s (line %d)" % (i, f.f_code.co_name, f.f_lineno))
print("-" * 66)
except ValueError:
pass
dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)]
for d in dump_data:
print(' '.join("{:02X}".format(byte2int(x)) for x in d) +
' ' * (16 - len(d)) + ' ' * 2 +
''.join(printable(x) for x in d))
print("-" * 66)
print()
class MysqlPacket(object):
"""Representation of a MySQL response packet.
Provides an interface for reading/parsing the packet results.
"""
__slots__ = ('_position', '_data')
def __init__(self, data, encoding):
self._position = 0
self._data = data
def get_all_data(self):
return self._data
def read(self, size):
"""Read the first 'size' bytes in packet and advance cursor past them."""
result = self._data[self._position:(self._position+size)]
if len(result) != size:
error = ('Result length not requested length:\n'
'Expected=%s. Actual=%s. Position: %s. Data Length: %s'
% (size, len(result), self._position, len(self._data)))
if DEBUG:
print(error)
self.dump()
raise AssertionError(error)
self._position += size
return result
def read_all(self):
"""Read all remaining data in the packet.
(Subsequent read() will return errors.)
"""
result = self._data[self._position:]
self._position = None # ensure no subsequent read()
return result
def advance(self, length):
"""Advance the cursor in data buffer 'length' bytes."""
new_position = self._position + length
if new_position < 0 or new_position > len(self._data):
raise Exception('Invalid advance amount (%s) for cursor. '
'Position=%s' % (length, new_position))
self._position = new_position
def rewind(self, position=0):
"""Set the position of the data buffer cursor to 'position'."""
if position < 0 or position > len(self._data):
raise Exception("Invalid position to rewind cursor to: %s." % position)
self._position = position
def get_bytes(self, position, length=1):
"""Get 'length' bytes starting at 'position'.
Position is start of payload (first four packet header bytes are not
included) starting at index '0'.
No error checking is done. If requesting outside end of buffer
an empty string (or string shorter than 'length') may be returned!
"""
return self._data[position:(position+length)]
if PY2:
def read_uint8(self):
result = ord(self._data[self._position])
self._position += 1
return result
else:
def read_uint8(self):
result = self._data[self._position]
self._position += 1
return result
def read_uint16(self):
result = struct.unpack_from('<H', self._data, self._position)[0]
self._position += 2
return result
def read_uint24(self):
low, high = struct.unpack_from('<HB', self._data, self._position)
self._position += 3
return low + (high << 16)
def read_uint32(self):
result = struct.unpack_from('<I', self._data, self._position)[0]
self._position += 4
return result
def read_uint64(self):
result = struct.unpack_from('<Q', self._data, self._position)[0]
self._position += 8
return result
def read_string(self):
end_pos = self._data.find(b'\0', self._position)
if end_pos < 0:
return None
result = self._data[self._position:end_pos]
self._position = end_pos + 1
return result
def read_length_encoded_integer(self):
"""Read a 'Length Coded Binary' number from the data buffer.
Length coded numbers can be anywhere from 1 to 9 bytes depending
on the value of the first byte.
"""
c = self.read_uint8()
if c == NULL_COLUMN:
return None
if c < UNSIGNED_CHAR_COLUMN:
return c
elif c == UNSIGNED_SHORT_COLUMN:
return self.read_uint16()
elif c == UNSIGNED_INT24_COLUMN:
return self.read_uint24()
elif c == UNSIGNED_INT64_COLUMN:
return self.read_uint64()
def read_length_coded_string(self):
"""Read a 'Length Coded String' from the data buffer.
A 'Length Coded String' consists first of a length coded
(unsigned, positive) integer represented in 1-9 bytes followed by
that many bytes of binary data. (For example "cat" would be "3cat".)
"""
length = self.read_length_encoded_integer()
if length is None:
return None
return self.read(length)
def read_struct(self, fmt):
s = struct.Struct(fmt)
result = s.unpack_from(self._data, self._position)
self._position += s.size
return result
def is_ok_packet(self):
# https://dev.mysql.com/doc/internals/en/packet-OK_Packet.html
return self._data[0:1] == b'\0' and len(self._data) >= 7
def is_eof_packet(self):
# http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet
# Caution: \xFE may be LengthEncodedInteger.
# If \xFE is LengthEncodedInteger header, 8bytes followed.
return self._data[0:1] == b'\xfe' and len(self._data) < 9
def is_auth_switch_request(self):
# http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
return self._data[0:1] == b'\xfe'
def is_extra_auth_data(self):
# https://dev.mysql.com/doc/internals/en/successful-authentication.html
return self._data[0:1] == b'\x01'
def is_resultset_packet(self):
field_count = ord(self._data[0:1])
return 1 <= field_count <= 250
def is_load_local_packet(self):
return self._data[0:1] == b'\xfb'
def is_error_packet(self):
return self._data[0:1] == b'\xff'
def check_error(self):
if self.is_error_packet():
self.rewind()
self.advance(1) # field_count == error (we already know that)
errno = self.read_uint16()
if DEBUG: print("errno =", errno)
err.raise_mysql_exception(self._data)
def dump(self):
dump_packet(self._data)
class FieldDescriptorPacket(MysqlPacket):
"""A MysqlPacket that represents a specific column's metadata in the result.
Parsing is automatically done and the results are exported via public
attributes on the class such as: db, table_name, name, length, type_code.
"""
def __init__(self, data, encoding):
MysqlPacket.__init__(self, data, encoding)
self._parse_field_descriptor(encoding)
def _parse_field_descriptor(self, encoding):
"""Parse the 'Field Descriptor' (Metadata) packet.
This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).
"""
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string().decode(encoding)
self.org_table = self.read_length_coded_string().decode(encoding)
self.name = self.read_length_coded_string().decode(encoding)
self.org_name = self.read_length_coded_string().decode(encoding)
self.charsetnr, self.length, self.type_code, self.flags, self.scale = (
self.read_struct('<xHIBHBxx'))
# 'default' is a length coded binary and is still in the buffer?
# not used for normal result sets...
def description(self):
"""Provides a 7-item tuple compatible with the Python PEP249 DB Spec."""
return (
self.name,
self.type_code,
None, # TODO: display_length; should this be self.length?
self.get_column_length(), # 'internal_size'
self.get_column_length(), # 'precision' # TODO: why!?!?
self.scale,
self.flags % 2 == 0)
def get_column_length(self):
if self.type_code == FIELD_TYPE.VAR_STRING:
mblen = MBLENGTH.get(self.charsetnr, 1)
return self.length // mblen
return self.length
def __str__(self):
return ('%s %r.%r.%r, type=%s, flags=%x'
% (self.__class__, self.db, self.table_name, self.name,
self.type_code, self.flags))
class OKPacketWrapper(object):
"""
OK Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_ok_packet():
raise ValueError('Cannot create ' + str(self.__class__.__name__) +
' object from invalid packet type')
self.packet = from_packet
self.packet.advance(1)
self.affected_rows = self.packet.read_length_encoded_integer()
self.insert_id = self.packet.read_length_encoded_integer()
self.server_status, self.warning_count = self.read_struct('<HH')
self.message = self.packet.read_all()
self.has_next = self.server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS
def __getattr__(self, key):
return getattr(self.packet, key)
class EOFPacketWrapper(object):
"""
EOF Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_eof_packet():
raise ValueError(
"Cannot create '{0}' object from invalid packet type".format(
self.__class__))
self.packet = from_packet
self.warning_count, self.server_status = self.packet.read_struct('<xhh')
if DEBUG: print("server_status=", self.server_status)
self.has_next = self.server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS
def __getattr__(self, key):
return getattr(self.packet, key)
class LoadLocalPacketWrapper(object):
"""
Load Local Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_load_local_packet():
raise ValueError(
"Cannot create '{0}' object from invalid packet type".format(
self.__class__))
self.packet = from_packet
self.filename = self.packet.get_all_data()[1:]
if DEBUG: print("filename=", self.filename)
def __getattr__(self, key):
return getattr(self.packet, key) | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/protocol.py | protocol.py |
from .mysql import Mysql
import sys
from ._compat import PY2
from .constants import FIELD_TYPE
from .converters import escape_dict, escape_sequence, escape_string
from .err import (
Warning, Error, InterfaceError, DataError,
DatabaseError, OperationalError, IntegrityError, InternalError,
NotSupportedError, ProgrammingError, MySQLError)
from .times import (
Date, Time, Timestamp,
DateFromTicks, TimeFromTicks, TimestampFromTicks)
VERSION = (0, 9, 3, None)
if VERSION[3] is not None:
VERSION_STRING = "%d.%d.%d_%s" % VERSION
else:
VERSION_STRING = "%d.%d.%d" % VERSION[:3]
threadsafety = 1
apilevel = "2.0"
paramstyle = "pyformat"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return frozenset.__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def Binary(x):
"""Return x as a binary type."""
if PY2:
return bytearray(x)
else:
return bytes(x)
def Connect(*args, **kwargs):
"""
Connect to the database; see connections.Connection.__init__() for
more information.
"""
from .connections import Connection
return Connection(*args, **kwargs)
from . import connections as _orig_conn
if _orig_conn.Connection.__init__.__doc__ is not None:
Connect.__doc__ = _orig_conn.Connection.__init__.__doc__
del _orig_conn
def get_client_info(): # for MySQLdb compatibility
version = VERSION
if VERSION[3] is None:
version = VERSION[:3]
return '.'.join(map(str, version))
connect = Connection = Connect
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1, 3, 12, "final", 0)
NULL = "NULL"
__version__ = get_client_info()
def thread_safe():
return True # match MySQLdb.thread_safe()
def install_as_MySQLdb():
"""
使用zdppy_mysql作为MySQL数据库DB引擎
"""
sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["zdppy_mysql"]
__all__ = [
'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"install_as_MySQLdb",
"NULL", "__version__",
"Mysql",
] | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/__init__.py | __init__.py |
import json
from typing import Tuple, Any, List, Union
import zdppy_mysql
from .sql import (
get_add_sql, get_add_many_sql, get_sql_delete_by_id,
get_sql_delete_by_ids, get_sql_update_by_id, get_sql_update_by_ids,
get_sql_find_by_id, get_sql_find_by_ids, get_sql_find_by_page,
get_create_table_sql, get_sql_find_column_in, get_sql_find_all)
from .json_encoder import JsonEncoder
class Mysql:
"""
同步的MySQL
"""
def __init__(self,
host: str = "127.0.0.1",
port: int = 3306,
user: str = 'root',
password: str = 'root',
db: str = 'test',
charset: str = 'utf8'
):
# 初始化连接
self.host = host
self.port = port
self.user = user
self.password = password
self.database = db
self.charset = charset
# 数据库列表 {数据库名:是否存在}
self.__databases = {}
# 表格列表 {表格名:是否存在}
self.__tables = {}
def get_connection(self):
"""
初始化,获取数据库连接池
:return:
"""
try:
conn = zdppy_mysql.connect(host=self.host,
port=self.port,
user=self.user,
password=self.password,
db=self.database,
charset=self.charset,
cursorclass=zdppy_mysql.cursors.DictCursor
)
return conn
except Exception as e:
return False
def executemany(self, sql, values):
"""
执行批量插入
:param sql:
:param values:
:return:
"""
# 获取数据库连接对象
conn = self.get_connection()
# 从连接池获取连接
with conn:
# 执行SQL语句
with conn.cursor() as cur:
cur.executemany(sql, values)
conn.commit()
# 获取结果
result = cur.rowcount
# 释放连接
conn.close()
cur.close()
# 返回结果
return result
def execute(self, sql: str, params: Any = None):
"""
执行SQL语句
:param sql: 要执行的SQL语句
:param params: 要传进来的参数
:return:
"""
# 获取数据库连接对象
conn = self.get_connection()
# 从连接池获取连接
result = 0
with conn:
# 执行SQL语句
with conn.cursor() as cur:
cur.execute(sql, params)
result = cur.rowcount
conn.commit()
return result
def executes(self, sqls):
"""
执行SQL语句
:param sqls:
:return:
"""
# 获取数据库连接对象
conn = self.get_connection()
# 受影响的函数
count = 0
# 从连接池获取连接
with conn:
# 执行SQL语句
with conn.cursor() as cur:
for sql in sqls:
try:
cur.execute(sql[0], sql[1])
except Exception as e:
conn.rollback()
count = 0
count += cur.rowcount
conn.commit()
# 返回sql执行后影响的行数
return count
def fetchone(self, sql: str, args: Tuple = None, to_json=False):
"""
执行SQL语句
:param sql:
:param args:
:return:
"""
# 获取数据库连接对象
conn = self.get_connection()
# 从连接池获取连接
with conn:
# 执行SQL语句
with conn.cursor() as cur:
# 执行sql语句
cur.execute(sql, args)
# 获取查询结果
result = cur.fetchone()
# 提交事务
conn.commit()
# 转换json数据
if to_json:
result = json.dumps(result, cls=JsonEncoder, ensure_ascii=False)
# 返回查询结果
return result
def fetchall(self, sql: str, args: Tuple = None, to_json: bool = False):
"""
执行SQL语句
:param sql:
:param args:
:param to_json: 是否转换为json数据
:return:
"""
# 获取数据库连接对象
conn = self.get_connection()
# 从连接池获取连接
with conn:
# 执行SQL语句
with conn.cursor() as cur:
# 执行sql语句
cur.execute(sql, args)
# 获取查询结果
result = cur.fetchall()
# 提交事务
conn.commit()
# 转换json数据
if to_json:
result = json.dumps(result, cls=JsonEncoder, ensure_ascii=False)
# 返回查询结果
return result
def add(self, table, columns, values):
"""
增加数据
:return:
"""
sql = get_add_sql(table, columns)
print(sql)
return self.execute(sql, values)
def add_many(self, table: str, columns: List[str], values: List[List]):
"""
批量增加数据
:return:
"""
sql = get_add_many_sql(table, columns, len(values))
values = tuple((i for k in values for i in k)) # 将参数展开为一维的元组
return self.execute(sql, values)
def delete_by_id(self, table: str, id: int):
"""
根据id删除数据
:return:
"""
sql = get_sql_delete_by_id(table)
return self.execute(sql, (id,))
def delete_by_ids(self, table: str, ids: Tuple):
"""
根据id列表删除
:return:
"""
sql = get_sql_delete_by_ids(table, len(ids))
return self.execute(sql, ids)
def update_by_id(self, table: str, columns: List[str], values: List[Any], id: int):
"""
根据id修改数据
:return:
"""
sql = get_sql_update_by_id(table, columns)
values.append(id)
return self.execute(sql, tuple(values))
def update_by_ids(self, table: str, columns: List[str], values: List[Any], ids: List[int]):
"""
根据id列表修改数据
:return:
"""
sql = get_sql_update_by_ids(table, columns, len(ids))
values.extend(ids)
return self.execute(sql, tuple(values))
def find_by_id(self, table: str, columns: Union[List[str], None], id: int):
"""
根据id查询数据
:return:
"""
sql = get_sql_find_by_id(table, columns)
return self.fetchone(sql, (id,))
def find_by_ids(self, table: str, columns: Union[List[str], None], ids: Tuple):
"""
根据id列表查询数据
:return:
"""
sql = get_sql_find_by_ids(table, columns, len(ids))
return self.fetchall(sql, ids)
def find_by_page(self,
table: str,
columns: Union[List[str], None],
page: int = 1,
size: int = 20,
asc_columns: List[str] = None,
desc_columns: List[str] = None):
"""
根据分页查询数据
:return:
"""
sql = get_sql_find_by_page(table, columns, page, size, asc_columns, desc_columns)
return self.fetchall(sql)
def find_all(self,
table: str,
columns: Union[List[str], None],
):
"""
查询所有
:param table 表格名
:param columns 字段列表
:return:
"""
sql = get_sql_find_all(table, columns)
return self.fetchall(sql)
def show_databases(self):
"""
查看所有的数据库
:return:
"""
sql = "show databases;"
# 获取数据库连接对象
conn = self.get_connection()
# 从连接池获取连接
result = None
with conn:
# 执行SQL语句
with conn.cursor() as cur:
cur.execute(sql)
result = cur.fetchall()
# 提取数据库名
if result is not None and isinstance(result, list):
result = [database.get("Database") for database in result]
# 生成字典
flags = [True for _ in result]
temp_dict = dict(zip(result, flags))
# 更新字典
self.__databases.update(temp_dict)
conn.commit()
return result
def create_database(self, database_name: str):
"""
创建数据库
:return:
"""
# 查看数据库
if self.__databases is None or len(self.__databases) == 0:
self.show_databases()
# 创建数据库
if not self.__databases.get(database_name):
sql = f"CREATE DATABASE IF NOT EXISTS `{database_name}` CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;"
result = self.execute(sql)
return result
def delete_database(self, database_name: str):
"""
删除数据库
:return:
"""
# 查看数据库
if self.__databases is None or len(self.__databases) == 0:
self.show_databases()
# 删除数据库
if self.__databases.get(database_name):
sql = f"DROP DATABASE IF EXISTS {database_name};"
result = self.execute(sql)
del self.__databases[database_name]
return result
def create_table(self,
table: str,
id_column=None,
columns: List = None,
open_engine=True,
open_common: bool = True
):
"""
创建表格
:param open_common 是否开启公共字段
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 创建表格
if not self.__tables.get(table):
# 获取创建表格的SQL语句
s = get_create_table_sql(
table,
id_column,
columns,
open_engine,
open_common,
)
# 创建表格
result = self.execute(s)
# 返回结果
return result
def show_tables(self):
"""
查看所有的表格
:return:
"""
sql = "show tables;"
# 获取数据库连接对象
conn = self.get_connection()
# 从连接池获取连接
result = None
with conn:
# 执行SQL语句
with conn.cursor() as cur:
cur.execute(sql)
result = cur.fetchall()
# 提取表格名
if result is not None and isinstance(result, list):
result = [table.get(f"Tables_in_{self.database}") for table in result]
# 生成字典
flags = [True for _ in result]
temp_dict = dict(zip(result, flags))
# 更新字典
self.__tables.update(temp_dict)
conn.commit()
return result
def delete_table(self, table: str):
"""
删除表格
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"drop table if exists {table};"
# 创建表格
if self.__tables.get(table):
# 删除表格
result = self.execute(s)
del self.__tables[table]
# 返回结果
return result
def rename_table(self, table: str, new_table: str):
"""
重命名表格
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} rename to {new_table};"
# 重命名表格
if self.__tables.get(table):
# 重命名表格
result = self.execute(s)
del self.__tables[table]
self.__tables[new_table] = True
# 返回结果
return result
def add_column(self, table: str, column: str, ctype: str):
"""
修改表格,新增一列
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} add column {column} {ctype};"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def delete_column(self, table: str, column: str):
"""
修改表格,删除一列
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} drop column {column};"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def update_column(self, table: str, column: str, new_column: str, new_column_type: str):
"""
修改表格,修改一列
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} change column {column} {new_column} {new_column_type};"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def add_primary_key(self, table: str, column: str):
"""
修改表格,添加主键
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} add primary key ({column});"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def delete_primary_key(self, table: str):
"""
修改表格,删除主键
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} drop primary key;"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def add_foreign_key(self, table: str,
foreign_key_column: str,
reference_table: str,
reference_primary_key: str = "id",
foreign_key_name: str = None,
):
"""
修改表格,添加外键
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
if foreign_key_name is None:
foreign_key_name = f"fk_{foreign_key_column}"
s = f"alter table {table} add constraint {foreign_key_name} foreign key({foreign_key_column}) references {reference_table}({reference_primary_key});"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def delete_foreign_key(self, table: str, foreign_key_name: str):
"""
修改表格,删除外键
:return:
"""
# 处理表格字典
if self.__tables is None or len(self.__tables) == 0:
self.show_tables()
# 整理SQL语句
s = f"alter table {table} drop foreign key {foreign_key_name};"
# 修改表格
if self.__tables.get(table):
# 修改表格
result = self.execute(s)
# 返回结果
return result
def find_column_in(self, table: str,
column: str = None,
values: Tuple = None,
to_json: bool = False,
show_columns: List = None,
):
"""
根据指定字段执行in查询
:param table:
:param show_columns:
:param column:
:param values:
:param to_json:
:return:
"""
sql = get_sql_find_column_in(table, show_columns, column, len(values))
return self.fetchall(sql, values, to_json=to_json) | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/mysql.py | mysql.py |
from socket import *
import io
import errno
__all__ = ['SocketIO']
EINTR = errno.EINTR
_blocking_errnos = (errno.EAGAIN, errno.EWOULDBLOCK)
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
n = e.args[0]
if n == EINTR:
continue
if n in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/_socketio.py | _socketio.py |
ERROR_FIRST = 1000
HASHCHK = 1000
NISAMCHK = 1001
NO = 1002
YES = 1003
CANT_CREATE_FILE = 1004
CANT_CREATE_TABLE = 1005
CANT_CREATE_DB = 1006
DB_CREATE_EXISTS = 1007
DB_DROP_EXISTS = 1008
DB_DROP_DELETE = 1009
DB_DROP_RMDIR = 1010
CANT_DELETE_FILE = 1011
CANT_FIND_SYSTEM_REC = 1012
CANT_GET_STAT = 1013
CANT_GET_WD = 1014
CANT_LOCK = 1015
CANT_OPEN_FILE = 1016
FILE_NOT_FOUND = 1017
CANT_READ_DIR = 1018
CANT_SET_WD = 1019
CHECKREAD = 1020
DISK_FULL = 1021
DUP_KEY = 1022
ERROR_ON_CLOSE = 1023
ERROR_ON_READ = 1024
ERROR_ON_RENAME = 1025
ERROR_ON_WRITE = 1026
FILE_USED = 1027
FILSORT_ABORT = 1028
FORM_NOT_FOUND = 1029
GET_ERRNO = 1030
ILLEGAL_HA = 1031
KEY_NOT_FOUND = 1032
NOT_FORM_FILE = 1033
NOT_KEYFILE = 1034
OLD_KEYFILE = 1035
OPEN_AS_READONLY = 1036
OUTOFMEMORY = 1037
OUT_OF_SORTMEMORY = 1038
UNEXPECTED_EOF = 1039
CON_COUNT_ERROR = 1040
OUT_OF_RESOURCES = 1041
BAD_HOST_ERROR = 1042
HANDSHAKE_ERROR = 1043
DBACCESS_DENIED_ERROR = 1044
ACCESS_DENIED_ERROR = 1045
NO_DB_ERROR = 1046
UNKNOWN_COM_ERROR = 1047
BAD_NULL_ERROR = 1048
BAD_DB_ERROR = 1049
TABLE_EXISTS_ERROR = 1050
BAD_TABLE_ERROR = 1051
NON_UNIQ_ERROR = 1052
SERVER_SHUTDOWN = 1053
BAD_FIELD_ERROR = 1054
WRONG_FIELD_WITH_GROUP = 1055
WRONG_GROUP_FIELD = 1056
WRONG_SUM_SELECT = 1057
WRONG_VALUE_COUNT = 1058
TOO_LONG_IDENT = 1059
DUP_FIELDNAME = 1060
DUP_KEYNAME = 1061
DUP_ENTRY = 1062
WRONG_FIELD_SPEC = 1063
PARSE_ERROR = 1064
EMPTY_QUERY = 1065
NONUNIQ_TABLE = 1066
INVALID_DEFAULT = 1067
MULTIPLE_PRI_KEY = 1068
TOO_MANY_KEYS = 1069
TOO_MANY_KEY_PARTS = 1070
TOO_LONG_KEY = 1071
KEY_COLUMN_DOES_NOT_EXITS = 1072
BLOB_USED_AS_KEY = 1073
TOO_BIG_FIELDLENGTH = 1074
WRONG_AUTO_KEY = 1075
READY = 1076
NORMAL_SHUTDOWN = 1077
GOT_SIGNAL = 1078
SHUTDOWN_COMPLETE = 1079
FORCING_CLOSE = 1080
IPSOCK_ERROR = 1081
NO_SUCH_INDEX = 1082
WRONG_FIELD_TERMINATORS = 1083
BLOBS_AND_NO_TERMINATED = 1084
TEXTFILE_NOT_READABLE = 1085
FILE_EXISTS_ERROR = 1086
LOAD_INFO = 1087
ALTER_INFO = 1088
WRONG_SUB_KEY = 1089
CANT_REMOVE_ALL_FIELDS = 1090
CANT_DROP_FIELD_OR_KEY = 1091
INSERT_INFO = 1092
UPDATE_TABLE_USED = 1093
NO_SUCH_THREAD = 1094
KILL_DENIED_ERROR = 1095
NO_TABLES_USED = 1096
TOO_BIG_SET = 1097
NO_UNIQUE_LOGFILE = 1098
TABLE_NOT_LOCKED_FOR_WRITE = 1099
TABLE_NOT_LOCKED = 1100
BLOB_CANT_HAVE_DEFAULT = 1101
WRONG_DB_NAME = 1102
WRONG_TABLE_NAME = 1103
TOO_BIG_SELECT = 1104
UNKNOWN_ERROR = 1105
UNKNOWN_PROCEDURE = 1106
WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
WRONG_PARAMETERS_TO_PROCEDURE = 1108
UNKNOWN_TABLE = 1109
FIELD_SPECIFIED_TWICE = 1110
INVALID_GROUP_FUNC_USE = 1111
UNSUPPORTED_EXTENSION = 1112
TABLE_MUST_HAVE_COLUMNS = 1113
RECORD_FILE_FULL = 1114
UNKNOWN_CHARACTER_SET = 1115
TOO_MANY_TABLES = 1116
TOO_MANY_FIELDS = 1117
TOO_BIG_ROWSIZE = 1118
STACK_OVERRUN = 1119
WRONG_OUTER_JOIN = 1120
NULL_COLUMN_IN_INDEX = 1121
CANT_FIND_UDF = 1122
CANT_INITIALIZE_UDF = 1123
UDF_NO_PATHS = 1124
UDF_EXISTS = 1125
CANT_OPEN_LIBRARY = 1126
CANT_FIND_DL_ENTRY = 1127
FUNCTION_NOT_DEFINED = 1128
HOST_IS_BLOCKED = 1129
HOST_NOT_PRIVILEGED = 1130
PASSWORD_ANONYMOUS_USER = 1131
PASSWORD_NOT_ALLOWED = 1132
PASSWORD_NO_MATCH = 1133
UPDATE_INFO = 1134
CANT_CREATE_THREAD = 1135
WRONG_VALUE_COUNT_ON_ROW = 1136
CANT_REOPEN_TABLE = 1137
INVALID_USE_OF_NULL = 1138
REGEXP_ERROR = 1139
MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
NONEXISTING_GRANT = 1141
TABLEACCESS_DENIED_ERROR = 1142
COLUMNACCESS_DENIED_ERROR = 1143
ILLEGAL_GRANT_FOR_TABLE = 1144
GRANT_WRONG_HOST_OR_USER = 1145
NO_SUCH_TABLE = 1146
NONEXISTING_TABLE_GRANT = 1147
NOT_ALLOWED_COMMAND = 1148
SYNTAX_ERROR = 1149
DELAYED_CANT_CHANGE_LOCK = 1150
TOO_MANY_DELAYED_THREADS = 1151
ABORTING_CONNECTION = 1152
NET_PACKET_TOO_LARGE = 1153
NET_READ_ERROR_FROM_PIPE = 1154
NET_FCNTL_ERROR = 1155
NET_PACKETS_OUT_OF_ORDER = 1156
NET_UNCOMPRESS_ERROR = 1157
NET_READ_ERROR = 1158
NET_READ_INTERRUPTED = 1159
NET_ERROR_ON_WRITE = 1160
NET_WRITE_INTERRUPTED = 1161
TOO_LONG_STRING = 1162
TABLE_CANT_HANDLE_BLOB = 1163
TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
DELAYED_INSERT_TABLE_LOCKED = 1165
WRONG_COLUMN_NAME = 1166
WRONG_KEY_COLUMN = 1167
WRONG_MRG_TABLE = 1168
DUP_UNIQUE = 1169
BLOB_KEY_WITHOUT_LENGTH = 1170
PRIMARY_CANT_HAVE_NULL = 1171
TOO_MANY_ROWS = 1172
REQUIRES_PRIMARY_KEY = 1173
NO_RAID_COMPILED = 1174
UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
KEY_DOES_NOT_EXITS = 1176
CHECK_NO_SUCH_TABLE = 1177
CHECK_NOT_IMPLEMENTED = 1178
CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
ERROR_DURING_COMMIT = 1180
ERROR_DURING_ROLLBACK = 1181
ERROR_DURING_FLUSH_LOGS = 1182
ERROR_DURING_CHECKPOINT = 1183
NEW_ABORTING_CONNECTION = 1184
DUMP_NOT_IMPLEMENTED = 1185
FLUSH_MASTER_BINLOG_CLOSED = 1186
INDEX_REBUILD = 1187
MASTER = 1188
MASTER_NET_READ = 1189
MASTER_NET_WRITE = 1190
FT_MATCHING_KEY_NOT_FOUND = 1191
LOCK_OR_ACTIVE_TRANSACTION = 1192
UNKNOWN_SYSTEM_VARIABLE = 1193
CRASHED_ON_USAGE = 1194
CRASHED_ON_REPAIR = 1195
WARNING_NOT_COMPLETE_ROLLBACK = 1196
TRANS_CACHE_FULL = 1197
SLAVE_MUST_STOP = 1198
SLAVE_NOT_RUNNING = 1199
BAD_SLAVE = 1200
MASTER_INFO = 1201
SLAVE_THREAD = 1202
TOO_MANY_USER_CONNECTIONS = 1203
SET_CONSTANTS_ONLY = 1204
LOCK_WAIT_TIMEOUT = 1205
LOCK_TABLE_FULL = 1206
READ_ONLY_TRANSACTION = 1207
DROP_DB_WITH_READ_LOCK = 1208
CREATE_DB_WITH_READ_LOCK = 1209
WRONG_ARGUMENTS = 1210
NO_PERMISSION_TO_CREATE_USER = 1211
UNION_TABLES_IN_DIFFERENT_DIR = 1212
LOCK_DEADLOCK = 1213
TABLE_CANT_HANDLE_FT = 1214
CANNOT_ADD_FOREIGN = 1215
NO_REFERENCED_ROW = 1216
ROW_IS_REFERENCED = 1217
CONNECT_TO_MASTER = 1218
QUERY_ON_MASTER = 1219
ERROR_WHEN_EXECUTING_COMMAND = 1220
WRONG_USAGE = 1221
WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
CANT_UPDATE_WITH_READLOCK = 1223
MIXING_NOT_ALLOWED = 1224
DUP_ARGUMENT = 1225
USER_LIMIT_REACHED = 1226
SPECIFIC_ACCESS_DENIED_ERROR = 1227
LOCAL_VARIABLE = 1228
GLOBAL_VARIABLE = 1229
NO_DEFAULT = 1230
WRONG_VALUE_FOR_VAR = 1231
WRONG_TYPE_FOR_VAR = 1232
VAR_CANT_BE_READ = 1233
CANT_USE_OPTION_HERE = 1234
NOT_SUPPORTED_YET = 1235
MASTER_FATAL_ERROR_READING_BINLOG = 1236
SLAVE_IGNORED_TABLE = 1237
INCORRECT_GLOBAL_LOCAL_VAR = 1238
WRONG_FK_DEF = 1239
KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
OPERAND_COLUMNS = 1241
SUBQUERY_NO_1_ROW = 1242
UNKNOWN_STMT_HANDLER = 1243
CORRUPT_HELP_DB = 1244
CYCLIC_REFERENCE = 1245
AUTO_CONVERT = 1246
ILLEGAL_REFERENCE = 1247
DERIVED_MUST_HAVE_ALIAS = 1248
SELECT_REDUCED = 1249
TABLENAME_NOT_ALLOWED_HERE = 1250
NOT_SUPPORTED_AUTH_MODE = 1251
SPATIAL_CANT_HAVE_NULL = 1252
COLLATION_CHARSET_MISMATCH = 1253
SLAVE_WAS_RUNNING = 1254
SLAVE_WAS_NOT_RUNNING = 1255
TOO_BIG_FOR_UNCOMPRESS = 1256
ZLIB_Z_MEM_ERROR = 1257
ZLIB_Z_BUF_ERROR = 1258
ZLIB_Z_DATA_ERROR = 1259
CUT_VALUE_GROUP_CONCAT = 1260
WARN_TOO_FEW_RECORDS = 1261
WARN_TOO_MANY_RECORDS = 1262
WARN_NULL_TO_NOTNULL = 1263
WARN_DATA_OUT_OF_RANGE = 1264
WARN_DATA_TRUNCATED = 1265
WARN_USING_OTHER_HANDLER = 1266
CANT_AGGREGATE_2COLLATIONS = 1267
DROP_USER = 1268
REVOKE_GRANTS = 1269
CANT_AGGREGATE_3COLLATIONS = 1270
CANT_AGGREGATE_NCOLLATIONS = 1271
VARIABLE_IS_NOT_STRUCT = 1272
UNKNOWN_COLLATION = 1273
SLAVE_IGNORED_SSL_PARAMS = 1274
SERVER_IS_IN_SECURE_AUTH_MODE = 1275
WARN_FIELD_RESOLVED = 1276
BAD_SLAVE_UNTIL_COND = 1277
MISSING_SKIP_SLAVE = 1278
UNTIL_COND_IGNORED = 1279
WRONG_NAME_FOR_INDEX = 1280
WRONG_NAME_FOR_CATALOG = 1281
WARN_QC_RESIZE = 1282
BAD_FT_COLUMN = 1283
UNKNOWN_KEY_CACHE = 1284
WARN_HOSTNAME_WONT_WORK = 1285
UNKNOWN_STORAGE_ENGINE = 1286
WARN_DEPRECATED_SYNTAX = 1287
NON_UPDATABLE_TABLE = 1288
FEATURE_DISABLED = 1289
OPTION_PREVENTS_STATEMENT = 1290
DUPLICATED_VALUE_IN_TYPE = 1291
TRUNCATED_WRONG_VALUE = 1292
TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
INVALID_ON_UPDATE = 1294
UNSUPPORTED_PS = 1295
GET_ERRMSG = 1296
GET_TEMPORARY_ERRMSG = 1297
UNKNOWN_TIME_ZONE = 1298
WARN_INVALID_TIMESTAMP = 1299
INVALID_CHARACTER_STRING = 1300
WARN_ALLOWED_PACKET_OVERFLOWED = 1301
CONFLICTING_DECLARATIONS = 1302
SP_NO_RECURSIVE_CREATE = 1303
SP_ALREADY_EXISTS = 1304
SP_DOES_NOT_EXIST = 1305
SP_DROP_FAILED = 1306
SP_STORE_FAILED = 1307
SP_LILABEL_MISMATCH = 1308
SP_LABEL_REDEFINE = 1309
SP_LABEL_MISMATCH = 1310
SP_UNINIT_VAR = 1311
SP_BADSELECT = 1312
SP_BADRETURN = 1313
SP_BADSTATEMENT = 1314
UPDATE_LOG_DEPRECATED_IGNORED = 1315
UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
QUERY_INTERRUPTED = 1317
SP_WRONG_NO_OF_ARGS = 1318
SP_COND_MISMATCH = 1319
SP_NORETURN = 1320
SP_NORETURNEND = 1321
SP_BAD_CURSOR_QUERY = 1322
SP_BAD_CURSOR_SELECT = 1323
SP_CURSOR_MISMATCH = 1324
SP_CURSOR_ALREADY_OPEN = 1325
SP_CURSOR_NOT_OPEN = 1326
SP_UNDECLARED_VAR = 1327
SP_WRONG_NO_OF_FETCH_ARGS = 1328
SP_FETCH_NO_DATA = 1329
SP_DUP_PARAM = 1330
SP_DUP_VAR = 1331
SP_DUP_COND = 1332
SP_DUP_CURS = 1333
SP_CANT_ALTER = 1334
SP_SUBSELECT_NYI = 1335
STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
SP_VARCOND_AFTER_CURSHNDLR = 1337
SP_CURSOR_AFTER_HANDLER = 1338
SP_CASE_NOT_FOUND = 1339
FPARSER_TOO_BIG_FILE = 1340
FPARSER_BAD_HEADER = 1341
FPARSER_EOF_IN_COMMENT = 1342
FPARSER_ERROR_IN_PARAMETER = 1343
FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
VIEW_NO_EXPLAIN = 1345
FRM_UNKNOWN_TYPE = 1346
WRONG_OBJECT = 1347
NONUPDATEABLE_COLUMN = 1348
VIEW_SELECT_DERIVED = 1349
VIEW_SELECT_CLAUSE = 1350
VIEW_SELECT_VARIABLE = 1351
VIEW_SELECT_TMPTABLE = 1352
VIEW_WRONG_LIST = 1353
WARN_VIEW_MERGE = 1354
WARN_VIEW_WITHOUT_KEY = 1355
VIEW_INVALID = 1356
SP_NO_DROP_SP = 1357
SP_GOTO_IN_HNDLR = 1358
TRG_ALREADY_EXISTS = 1359
TRG_DOES_NOT_EXIST = 1360
TRG_ON_VIEW_OR_TEMP_TABLE = 1361
TRG_CANT_CHANGE_ROW = 1362
TRG_NO_SUCH_ROW_IN_TRG = 1363
NO_DEFAULT_FOR_FIELD = 1364
DIVISION_BY_ZERO = 1365
TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
ILLEGAL_VALUE_FOR_TYPE = 1367
VIEW_NONUPD_CHECK = 1368
VIEW_CHECK_FAILED = 1369
PROCACCESS_DENIED_ERROR = 1370
RELAY_LOG_FAIL = 1371
PASSWD_LENGTH = 1372
UNKNOWN_TARGET_BINLOG = 1373
IO_ERR_LOG_INDEX_READ = 1374
BINLOG_PURGE_PROHIBITED = 1375
FSEEK_FAIL = 1376
BINLOG_PURGE_FATAL_ERR = 1377
LOG_IN_USE = 1378
LOG_PURGE_UNKNOWN_ERR = 1379
RELAY_LOG_INIT = 1380
NO_BINARY_LOGGING = 1381
RESERVED_SYNTAX = 1382
WSAS_FAILED = 1383
DIFF_GROUPS_PROC = 1384
NO_GROUP_FOR_PROC = 1385
ORDER_WITH_PROC = 1386
LOGGING_PROHIBIT_CHANGING_OF = 1387
NO_FILE_MAPPING = 1388
WRONG_MAGIC = 1389
PS_MANY_PARAM = 1390
KEY_PART_0 = 1391
VIEW_CHECKSUM = 1392
VIEW_MULTIUPDATE = 1393
VIEW_NO_INSERT_FIELD_LIST = 1394
VIEW_DELETE_MERGE_VIEW = 1395
CANNOT_USER = 1396
XAER_NOTA = 1397
XAER_INVAL = 1398
XAER_RMFAIL = 1399
XAER_OUTSIDE = 1400
XAER_RMERR = 1401
XA_RBROLLBACK = 1402
NONEXISTING_PROC_GRANT = 1403
PROC_AUTO_GRANT_FAIL = 1404
PROC_AUTO_REVOKE_FAIL = 1405
DATA_TOO_LONG = 1406
SP_BAD_SQLSTATE = 1407
STARTUP = 1408
LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
CANT_CREATE_USER_WITH_GRANT = 1410
WRONG_VALUE_FOR_TYPE = 1411
TABLE_DEF_CHANGED = 1412
SP_DUP_HANDLER = 1413
SP_NOT_VAR_ARG = 1414
SP_NO_RETSET = 1415
CANT_CREATE_GEOMETRY_OBJECT = 1416
FAILED_ROUTINE_BREAK_BINLOG = 1417
BINLOG_UNSAFE_ROUTINE = 1418
BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
EXEC_STMT_WITH_OPEN_CURSOR = 1420
STMT_HAS_NO_OPEN_CURSOR = 1421
COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
NO_DEFAULT_FOR_VIEW_FIELD = 1423
SP_NO_RECURSION = 1424
TOO_BIG_SCALE = 1425
TOO_BIG_PRECISION = 1426
M_BIGGER_THAN_D = 1427
WRONG_LOCK_OF_SYSTEM_TABLE = 1428
CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
QUERY_ON_FOREIGN_DATA_SOURCE = 1430
FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
FOREIGN_DATA_STRING_INVALID = 1433
CANT_CREATE_FEDERATED_TABLE = 1434
TRG_IN_WRONG_SCHEMA = 1435
STACK_OVERRUN_NEED_MORE = 1436
TOO_LONG_BODY = 1437
WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
TOO_BIG_DISPLAYWIDTH = 1439
XAER_DUPID = 1440
DATETIME_FUNCTION_OVERFLOW = 1441
CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
VIEW_PREVENT_UPDATE = 1443
PS_NO_RECURSION = 1444
SP_CANT_SET_AUTOCOMMIT = 1445
MALFORMED_DEFINER = 1446
VIEW_FRM_NO_USER = 1447
VIEW_OTHER_USER = 1448
NO_SUCH_USER = 1449
FORBID_SCHEMA_CHANGE = 1450
ROW_IS_REFERENCED_2 = 1451
NO_REFERENCED_ROW_2 = 1452
SP_BAD_VAR_SHADOW = 1453
TRG_NO_DEFINER = 1454
OLD_FILE_FORMAT = 1455
SP_RECURSION_LIMIT = 1456
SP_PROC_TABLE_CORRUPT = 1457
SP_WRONG_NAME = 1458
TABLE_NEEDS_UPGRADE = 1459
SP_NO_AGGREGATE = 1460
MAX_PREPARED_STMT_COUNT_REACHED = 1461
VIEW_RECURSIVE = 1462
NON_GROUPING_FIELD_USED = 1463
TABLE_CANT_HANDLE_SPKEYS = 1464
NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
USERNAME = 1466
HOSTNAME = 1467
WRONG_STRING_LENGTH = 1468
ERROR_LAST = 1468
# https://github.com/PyMySQL/PyMySQL/issues/607
CONSTRAINT_FAILED = 4025 | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/constants/ER.py | ER.py |
CR_ERROR_FIRST = 2000
CR_UNKNOWN_ERROR = 2000
CR_SOCKET_CREATE_ERROR = 2001
CR_CONNECTION_ERROR = 2002
CR_CONN_HOST_ERROR = 2003
CR_IPSOCK_ERROR = 2004
CR_UNKNOWN_HOST = 2005
CR_SERVER_GONE_ERROR = 2006
CR_VERSION_ERROR = 2007
CR_OUT_OF_MEMORY = 2008
CR_WRONG_HOST_INFO = 2009
CR_LOCALHOST_CONNECTION = 2010
CR_TCP_CONNECTION = 2011
CR_SERVER_HANDSHAKE_ERR = 2012
CR_SERVER_LOST = 2013
CR_COMMANDS_OUT_OF_SYNC = 2014
CR_NAMEDPIPE_CONNECTION = 2015
CR_NAMEDPIPEWAIT_ERROR = 2016
CR_NAMEDPIPEOPEN_ERROR = 2017
CR_NAMEDPIPESETSTATE_ERROR = 2018
CR_CANT_READ_CHARSET = 2019
CR_NET_PACKET_TOO_LARGE = 2020
CR_EMBEDDED_CONNECTION = 2021
CR_PROBE_SLAVE_STATUS = 2022
CR_PROBE_SLAVE_HOSTS = 2023
CR_PROBE_SLAVE_CONNECT = 2024
CR_PROBE_MASTER_CONNECT = 2025
CR_SSL_CONNECTION_ERROR = 2026
CR_MALFORMED_PACKET = 2027
CR_WRONG_LICENSE = 2028
CR_NULL_POINTER = 2029
CR_NO_PREPARE_STMT = 2030
CR_PARAMS_NOT_BOUND = 2031
CR_DATA_TRUNCATED = 2032
CR_NO_PARAMETERS_EXISTS = 2033
CR_INVALID_PARAMETER_NO = 2034
CR_INVALID_BUFFER_USE = 2035
CR_UNSUPPORTED_PARAM_TYPE = 2036
CR_SHARED_MEMORY_CONNECTION = 2037
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = 2038
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = 2039
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = 2040
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = 2041
CR_SHARED_MEMORY_FILE_MAP_ERROR = 2042
CR_SHARED_MEMORY_MAP_ERROR = 2043
CR_SHARED_MEMORY_EVENT_ERROR = 2044
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = 2045
CR_SHARED_MEMORY_CONNECT_SET_ERROR = 2046
CR_CONN_UNKNOW_PROTOCOL = 2047
CR_INVALID_CONN_HANDLE = 2048
CR_SECURE_AUTH = 2049
CR_FETCH_CANCELED = 2050
CR_NO_DATA = 2051
CR_NO_STMT_METADATA = 2052
CR_NO_RESULT_SET = 2053
CR_NOT_IMPLEMENTED = 2054
CR_SERVER_LOST_EXTENDED = 2055
CR_STMT_CLOSED = 2056
CR_NEW_STMT_METADATA = 2057
CR_ALREADY_CONNECTED = 2058
CR_AUTH_PLUGIN_CANNOT_LOAD = 2059
CR_DUPLICATE_CONNECTION_ATTR = 2060
CR_AUTH_PLUGIN_ERR = 2061
CR_ERROR_LAST = 2061 | zdppy-mysql | /zdppy_mysql-0.2.5.tar.gz/zdppy_mysql-0.2.5/zdppy_mysql/constants/CR.py | CR.py |
import threading
class NacosTimer(object):
__slots__ = ['_name', '_timer', '_fn', '_interval', '_ignore_ex', '_on_result', '_on_exception',
'_args', '_kwargs']
def __init__(self,
name,
fn,
interval=7,
*args,
**kwargs):
"""
NacosTimer
:param name: timer name
:param fn: function which scheduler
:param interval: scheduler interval, default 7s
:param args: args in function
:param kwargs: kwargs in function
"""
#
self._name = name
# Thread.Timer
self._timer = None
# function which callable
self._fn = fn
# timer interval default 7s
self._interval = interval
# whether ignore invoke exception
self._ignore_ex = False
self._on_result = None
self._on_exception = None
# function args
self._args = args
# function kwargs
self._kwargs = kwargs
@property
def name(self):
return self._name
def set_name(self, name):
self._name = name
return self
@property
def fn(self):
return self._fn
def set_fn(self, fn):
self._fn = fn
return self
@property
def interval(self, ):
return self._interval
def set_interval(self, interval):
self._interval = interval
return self
@property
def ignore_ex(self):
return self._ignore_ex
def set_ignore_ex(self, ignore_ex):
self._ignore_ex = ignore_ex
return self
@property
def on_result(self):
return self._on_result
def set_on_result(self, fn):
self._on_result = fn
return self
@property
def on_exception(self):
return self._on_exception
def set_on_exception(self, fn):
self._on_exception = fn
return self
def alive(self):
if self._timer is None:
return False
return self._timer.is_alive()
def scheduler(self):
try:
res = self._fn(*self._args, **self._kwargs)
if self._on_result:
self._on_result(res)
except Exception as ex:
if self._on_exception:
self._on_exception(ex)
if not self._ignore_ex:
# stop timer
raise ex
self._timer = threading.Timer(self._interval, self.scheduler, )
self._timer.start()
def cancel(self):
if self._timer:
self._timer.cancel()
class NacosTimerManager(object):
def __init__(self, ):
self._timers_container = {}
self._executed = False
def all_timers(self):
return self._timers_container
def add_timer(self, timer):
self._timers_container[timer.name] = timer
return self
def execute(self):
"""
scheduler all timer in manager
:return: None
"""
if self._executed:
return
for name, timer in self._timers_container.items():
if timer.alive():
continue
timer.scheduler()
self._executed = True
def cancel_timer(self, timer_name=None, ):
"""
cancel timer , and nacos timer still in container
it can execute again.
:param timer_name:
:return: None
"""
timer = self._timers_container.get(timer_name)
if timer:
timer.cancel()
def cancel(self):
"""
cancel all timer in container
:return: None
"""
for _, timer in self._timers_container.items():
timer.cancel()
def stop_timer(self, timer_name):
"""
cancel nacos timer and remove it from timer container
:param timer_name:
:return: None
"""
self.cancel_timer(timer_name)
self._timers_container.pop(timer_name)
def stop(self):
"""
remove all timer, and it can not execute again
"""
self.cancel()
self._timers_container.clear() | zdppy-nacos | /zdppy_nacos-0.1.0-py3-none-any.whl/zdppy_nacos/timer.py | timer.py |
import base64
import hashlib
import logging
import socket
import json
import platform
import time
try:
import ssl
except ImportError:
ssl = None
from multiprocessing import Process, Manager, Queue, pool
from threading import RLock, Thread
try:
# python3.6
from http import HTTPStatus
from urllib.request import Request, urlopen, ProxyHandler, build_opener
from urllib.parse import urlencode, unquote_plus, quote
from urllib.error import HTTPError, URLError
except ImportError:
# python2.7
import httplib as HTTPStatus
from urllib2 import Request, urlopen, HTTPError, URLError, ProxyHandler, build_opener
from urllib import urlencode, unquote_plus, quote
base64.encodebytes = base64.encodestring
from .commons import synchronized_with_attr, truncate, python_version_bellow
from .params import group_key, parse_key, is_valid
from .files import read_file_str, save_file, delete_file
from .exception import NacosException, NacosRequestException
from .listener import Event, SimpleListenerManager
from .timer import NacosTimer, NacosTimerManager
logging.basicConfig()
logger = logging.getLogger(__name__)
DEBUG = False
VERSION = "0.1.5"
DEFAULT_GROUP_NAME = "DEFAULT_GROUP"
DEFAULT_NAMESPACE = ""
WORD_SEPARATOR = u'\x02'
LINE_SEPARATOR = u'\x01'
DEFAULTS = {
"APP_NAME": "Nacos-SDK-Python",
"TIMEOUT": 3, # in seconds
"PULLING_TIMEOUT": 30, # in seconds
"PULLING_CONFIG_SIZE": 3000,
"CALLBACK_THREAD_NUM": 10,
"FAILOVER_BASE": "nacos-data/data",
"SNAPSHOT_BASE": "nacos-data/snapshot",
}
OPTIONS = {"default_timeout", "pulling_timeout", "pulling_config_size", "callback_thread_num", "failover_base",
"snapshot_base", "no_snapshot", "proxies"}
def process_common_config_params(data_id, group):
if not group or not group.strip():
group = DEFAULT_GROUP_NAME
else:
group = group.strip()
if not data_id or not is_valid(data_id):
raise NacosException("Invalid dataId.")
if not is_valid(group):
raise NacosException("Invalid group.")
return data_id, group
def parse_pulling_result(result):
if not result:
return list()
ret = list()
for i in unquote_plus(result.decode()).split(LINE_SEPARATOR):
if not i.strip():
continue
sp = i.split(WORD_SEPARATOR)
if len(sp) < 3:
sp.append("")
ret.append(sp)
return ret
class WatcherWrap:
def __init__(self, key, callback, last_md5=None):
self.callback = callback
self.last_md5 = last_md5
self.watch_key = key
class CacheData:
def __init__(self, key, client):
self.key = key
local_value = read_file_str(client.failover_base, key) or read_file_str(client.snapshot_base, key)
self.content = local_value
self.md5 = hashlib.md5(local_value.encode("UTF-8")).hexdigest() if local_value else None
self.is_init = True
if not self.md5:
logger.debug("[init-cache] cache for %s does not have local value" % key)
class SubscribedLocalInstance(object):
def __init__(self, key, instance):
self.key = key
self.instance_id = instance["instanceId"]
self.md5 = NacosClient.get_md5(str(instance))
self.instance = instance
class SubscribedLocalManager(object):
def __init__(self):
self.manager = {
# "key1": {
# "LOCAL_INSTANCES": {
# "instanceId1": None,
# "instanceId2": None,
# "instanceId3": None,
# "instanceId4": None
# },
# "LISTENER_MANAGER": None
# },
# "key2": {
# "LOCAL_INSTANCES": {
# "instanceId1": "",
# "instanceId2": "",
# "instanceId3": "",
# "instanceId4": ""
# },
# "LISTENER_MANAGER": None
# }
}
def do_listener_launch(self, key, event, slc):
listener_manager = self.get_local_listener_manager(key)
if listener_manager and isinstance(listener_manager, SimpleListenerManager):
listener_manager.do_launch(event, slc)
def get_local_listener_manager(self, key):
key_node = self.manager.get(key)
if not key_node:
return None
return key_node.get("LISTENER_MANAGER")
def add_local_listener(self, key, listener_fn):
if not self.manager.get(key):
self.manager[key] = {}
local_listener_manager = self.manager.get(key).get("LISTENER_MANAGER")
if not local_listener_manager or not isinstance(local_listener_manager, SimpleListenerManager):
self.manager.get(key)["LISTENER_MANAGER"] = SimpleListenerManager()
local_listener_manager = self.manager.get(key).get("LISTENER_MANAGER")
if not local_listener_manager:
return self
if isinstance(listener_fn, list):
listener_fn = tuple(listener_fn)
local_listener_manager.add_listeners(*listener_fn)
if isinstance(listener_fn, tuple):
local_listener_manager.add_listeners(*listener_fn)
# just single listener function
else:
local_listener_manager.add_listener(listener_fn)
return self
def add_local_listener_manager(self, key, listener_manager):
key_node = self.manager.get(key)
if key_node is None:
key_node = {}
key_node["LISTENER_MANAGER"] = listener_manager
return self
def get_local_instances(self, key):
if not self.manager.get(key):
return None
return self.manager.get(key).get("LOCAL_INSTANCES")
def add_local_instance(self, slc):
if not self.manager.get(slc.key):
self.manager[slc.key] = {}
if not self.manager.get(slc.key).get('LOCAL_INSTANCES'):
self.manager.get(slc.key)['LOCAL_INSTANCES'] = {}
self.manager.get(slc.key)['LOCAL_INSTANCES'][slc.instance_id] = slc
return self
def remove_local_instance(self, slc):
key_node = self.manager.get(slc.key)
if not key_node:
return self
local_instances_node = key_node.get("LOCAL_INSTANCES")
if not local_instances_node:
return self
local_instance = local_instances_node.get(slc.instance_id)
if not local_instance:
return self
local_instances_node.pop(slc.instance_id)
return self
def parse_nacos_server_addr(server_addr):
sp = server_addr.split(":")
port = int(sp[1]) if len(sp) > 1 else 8848
return sp[0], port
class NacosClient:
"""
nacos客户端类
"""
debug = False
@staticmethod
def set_debugging():
if not NacosClient.debug:
global logger
logger = logging.getLogger("nacos")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s:%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
NacosClient.debug = True
@staticmethod
def get_md5(content):
return hashlib.md5(content.encode("UTF-8")).hexdigest() if content is not None else None
def __init__(self,
server_addresses: str = "127.0.0.1:8848",
endpoint=None,
namespace: str = "",
ak=None,
sk=None,
username: str = "nacos",
password: str = "nacos"):
# 名称空间ID必填
if not namespace:
raise Exception("名称空间ID不能为空")
self.server_list = list()
try:
for server_addr in server_addresses.split(","):
self.server_list.append(parse_nacos_server_addr(server_addr.strip()))
except Exception as ex:
logger.exception("[init] bad server address for %s" % server_addresses)
raise ex
self.current_server = self.server_list[0]
self.endpoint = endpoint
self.namespace = namespace # 名称空间
self.ak = ak
self.sk = sk
self.username = username # 用户名
self.password = password # 密码
self.server_list_lock = RLock()
self.server_offset = 0
self.watcher_mapping = dict()
self.subscribed_local_manager = SubscribedLocalManager()
self.subscribe_timer_manager = NacosTimerManager()
self.pulling_lock = RLock()
self.puller_mapping = None
self.notify_queue = None
self.callback_tread_pool = None
self.process_mgr = None
self.default_timeout = DEFAULTS["TIMEOUT"]
self.auth_enabled = self.ak and self.sk
self.cai_enabled = True
self.pulling_timeout = DEFAULTS["PULLING_TIMEOUT"]
self.pulling_config_size = DEFAULTS["PULLING_CONFIG_SIZE"]
self.callback_thread_num = DEFAULTS["CALLBACK_THREAD_NUM"]
self.failover_base = DEFAULTS["FAILOVER_BASE"]
self.snapshot_base = DEFAULTS["SNAPSHOT_BASE"]
self.no_snapshot = False
self.proxies = None
logger.info("[client-init] endpoint:%s, tenant:%s" % (endpoint, namespace))
def set_options(self, **kwargs):
for k, v in kwargs.items():
if k not in OPTIONS:
logger.warning("[set_options] unknown option:%s, ignored" % k)
continue
logger.debug("[set_options] key:%s, value:%s" % (k, v))
setattr(self, k, v)
def change_server(self):
with self.server_list_lock:
self.server_offset = (self.server_offset + 1) % len(self.server_list)
self.current_server = self.server_list[self.server_offset]
def get_server(self):
logger.info("[get-server] use server:%s" % str(self.current_server))
return self.current_server
def remove_config(self, data_id, group, timeout=None):
data_id, group = process_common_config_params(data_id, group)
logger.info(
"[remove] data_id:%s, group:%s, namespace:%s, timeout:%s" % (data_id, group, self.namespace, timeout))
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, None, params,
timeout or self.default_timeout, "DELETE")
c = resp.read()
logger.info("[remove] remove group:%s, data_id:%s, server response:%s" % (
group, data_id, c))
return c == b"true"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
logger.error(
"[remove] no right for namespace:%s, group:%s, data_id:%s" % (self.namespace, group, data_id))
raise NacosException("Insufficient privilege.")
else:
logger.error("[remove] error code [:%s] for namespace:%s, group:%s, data_id:%s" % (
e.code, self.namespace, group, data_id))
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove] exception %s occur" % str(e))
raise
def publish_config(self, data_id, group, content, app_name=None, timeout=None):
if content is None:
raise NacosException("Can not publish none content, use remove instead.")
data_id, group = process_common_config_params(data_id, group)
if type(content) == bytes:
content = content.decode("UTF-8")
logger.info("[publish] data_id:%s, group:%s, namespace:%s, content:%s, timeout:%s" % (
data_id, group, self.namespace, truncate(content), timeout))
params = {
"dataId": data_id,
"group": group,
"content": content.encode("UTF-8"),
}
if self.namespace:
params["tenant"] = self.namespace
if app_name:
params["appName"] = app_name
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, None, params,
timeout or self.default_timeout, "POST")
c = resp.read()
logger.info("[publish] publish content, group:%s, data_id:%s, server response:%s" % (
group, data_id, c))
return c == b"true"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[publish] exception %s occur" % str(e))
raise
def get_config(self,
data_id: str,
group: str = "dev",
timeout=None,
no_snapshot=None) -> str:
"""
获取配置
:param data_id: 配置ID
:param group: 分组
:param timeout: 超时
:param no_snapshot: 不使用缓存
:return: 配置信息字符串
"""
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
data_id, group = process_common_config_params(data_id, group)
params = {
"dataId": data_id,
"group": group,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key(data_id, group, self.namespace)
# 从本地缓存读取
content = read_file_str(self.failover_base, cache_key)
if content:
return content
# 从服务器读取
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("UTF-8")
except HTTPError as e:
if e.code == HTTPStatus.NOT_FOUND:
logger.warning(
"[get-config] config not found for data_id:%s, group:%s, namespace:%s, try to delete snapshot" % (
data_id, group, self.namespace))
delete_file(self.snapshot_base, cache_key)
return ""
elif e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-config] config being modified concurrently for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-config] no right for data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
raise NacosException("Insufficient privilege.")
else:
logger.error("[get-config] error code [:%s] for data_id:%s, group:%s, namespace:%s" % (
e.code, data_id, group, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return content
# 获取到数据了
if content is not None:
logger.info(
"[get-config] content from server:%s, data_id:%s, group:%s, namespace:%s, try to save snapshot" % (
truncate(content), data_id, group, self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
except Exception as e:
logger.exception("[get-config] save snapshot failed for %s, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace, str(e)))
return content
# 没有从服务器获取到数据,再次从本地获取
logger.error("[get-config] get config from server failed, try snapshot, data_id:%s, group:%s, namespace:%s" % (
data_id, group, self.namespace))
content = read_file_str(self.snapshot_base, cache_key)
if content is None:
logger.warning("[get-config] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-config] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return content
def get_configs(self, timeout=None, no_snapshot=None, group="", page_no=1, page_size=1000):
no_snapshot = self.no_snapshot if no_snapshot is None else no_snapshot
logger.info("[get-configs] namespace:%s, timeout:%s, group:%s, page_no:%s, page_size:%s" % (
self.namespace, timeout, group, page_no, page_size))
params = {
"dataId": "",
"group": group,
"search": "accurate",
"pageNo": page_no,
"pageSize": page_size,
}
if self.namespace:
params["tenant"] = self.namespace
cache_key = group_key("", "", self.namespace)
# get from failover
content = read_file_str(self.failover_base, cache_key)
if content is None:
logger.debug("[get-config] failover config is not exist for %s, try to get from server" % cache_key)
else:
logger.debug("[get-config] get %s from failover directory, content is %s" % (cache_key, truncate(content)))
return json.loads(content)
# get from server
try:
resp = self._do_sync_req("/nacos/v1/cs/configs", None, params, None, timeout or self.default_timeout)
content = resp.read().decode("UTF-8")
except HTTPError as e:
if e.code == HTTPStatus.CONFLICT:
logger.error(
"[get-configs] configs being modified concurrently for namespace:%s" % self.namespace)
elif e.code == HTTPStatus.FORBIDDEN:
logger.error("[get-configs] no right for namespace:%s" % self.namespace)
raise NacosException("Insufficient privilege.")
else:
logger.error("[get-configs] error code [:%s] for namespace:%s" % (e.code, self.namespace))
if no_snapshot:
raise
except Exception as e:
logger.exception("[get-config] exception %s occur" % str(e))
if no_snapshot:
raise
if no_snapshot:
return json.loads(content)
if content is not None:
logger.info(
"[get-configs] content from server:%s, namespace:%s, try to save snapshot" % (
truncate(content), self.namespace))
try:
save_file(self.snapshot_base, cache_key, content)
for item in json.loads(content).get("pageItems"):
data_id = item.get('dataId')
group = item.get('group')
item_content = item.get('content')
item_cache_key = group_key(data_id, group, self.namespace)
save_file(self.snapshot_base, item_cache_key, item_content)
except Exception as e:
logger.exception("[get-configs] save snapshot failed for %s, namespace:%s" % (
str(e), self.namespace))
return json.loads(content)
logger.error("[get-configs] get config from server failed, try snapshot, namespace:%s" % self.namespace)
content = read_file_str(self.snapshot_base, cache_key)
if content is None:
logger.warning("[get-configs] snapshot is not exist for %s." % cache_key)
else:
logger.debug("[get-configs] get %s from snapshot directory, content is %s" % (cache_key, truncate(content)))
return json.loads(content)
@synchronized_with_attr("pulling_lock")
def add_config_watcher(self, data_id, group, cb, content=None):
"""监听配置变化"""
self.add_config_watchers(data_id, group, [cb], content)
@synchronized_with_attr("pulling_lock")
def add_config_watchers(self, data_id, group, cb_list, content=None):
"""监听配置变化"""
if not cb_list:
raise NacosException("A callback function is needed.")
data_id, group = process_common_config_params(data_id, group)
logger.info("[add-watcher] data_id:%s, group:%s, namespace:%s" % (data_id, group, self.namespace))
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
wl = list()
self.watcher_mapping[cache_key] = wl
if not content:
content = self.get_config(data_id, group)
last_md5 = NacosClient.get_md5(content)
for cb in cb_list:
wl.append(WatcherWrap(cache_key, cb, last_md5))
logger.info("[add-watcher] watcher has been added for key:%s, new callback is:%s, callback number is:%s" % (
cache_key, cb.__name__, len(wl)))
if self.puller_mapping is None:
logger.debug("[add-watcher] pulling should be initialized")
self._init_pulling()
if cache_key in self.puller_mapping:
logger.debug("[add-watcher] key:%s is already in pulling" % cache_key)
return
for key, puller_info in self.puller_mapping.items():
if len(puller_info[1]) < self.pulling_config_size:
logger.debug("[add-watcher] puller:%s is available, add key:%s" % (puller_info[0], cache_key))
puller_info[1].append(cache_key)
self.puller_mapping[cache_key] = puller_info
break
else:
logger.debug("[add-watcher] no puller available, new one and add key:%s" % cache_key)
key_list = self.process_mgr.list()
key_list.append(cache_key)
sys_os = platform.system()
if sys_os == 'Windows':
puller = Thread(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.setDaemon(True)
else:
puller = Process(target=self._do_pulling, args=(key_list, self.notify_queue))
puller.daemon = True
puller.start()
self.puller_mapping[cache_key] = (puller, key_list)
@synchronized_with_attr("pulling_lock")
def remove_config_watcher(self, data_id, group, cb, remove_all=False):
if not cb:
raise NacosException("A callback function is needed.")
data_id, group = process_common_config_params(data_id, group)
if not self.puller_mapping:
logger.warning("[remove-watcher] watcher is never started.")
return
cache_key = group_key(data_id, group, self.namespace)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.warning("[remove-watcher] there is no watcher on key:%s" % cache_key)
return
wrap_to_remove = list()
for i in wl:
if i.callback == cb:
wrap_to_remove.append(i)
if not remove_all:
break
for i in wrap_to_remove:
wl.remove(i)
logger.info("[remove-watcher] %s is removed from %s, remove all:%s" % (cb.__name__, cache_key, remove_all))
if not wl:
logger.debug("[remove-watcher] there is no watcher for:%s, kick out from pulling" % cache_key)
self.watcher_mapping.pop(cache_key)
puller_info = self.puller_mapping[cache_key]
puller_info[1].remove(cache_key)
if not puller_info[1]:
logger.debug("[remove-watcher] there is no pulling keys for puller:%s, stop it" % puller_info[0])
self.puller_mapping.pop(cache_key)
if isinstance(puller_info[0], Process):
puller_info[0].terminate()
def _do_sync_req(self, url, headers=None, params=None, data=None, timeout=None, method="GET"):
if self.username and self.password:
if not params:
params = {}
params.update({"username": self.username, "password": self.password})
url = "?".join([url, urlencode(params)]) if params else url
all_headers = self._get_common_headers(params, data)
if headers:
all_headers.update(headers)
logger.debug(
"[do-sync-req] url:%s, headers:%s, params:%s, data:%s, timeout:%s" % (
url, all_headers, params, data, timeout))
tries = 0
while True:
try:
server_info = self.get_server()
if not server_info:
logger.error("[do-sync-req] can not get one server.")
raise NacosRequestException("Server is not available.")
address, port = server_info
server = ":".join([address, str(port)])
server_url = "%s://%s" % ("http", server)
if python_version_bellow("3"):
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers)
req.get_method = lambda: method
else:
req = Request(url=server_url + url, data=urlencode(data).encode() if data else None,
headers=all_headers, method=method)
# build a new opener that adds proxy setting so that http request go through the proxy
if self.proxies:
proxy_support = ProxyHandler(self.proxies)
opener = build_opener(proxy_support)
resp = opener.open(req, timeout=timeout)
else:
# for python version compatibility
if python_version_bellow("2.7.9"):
resp = urlopen(req, timeout=timeout)
else:
resp = urlopen(req, timeout=timeout, context=None)
logger.debug("[do-sync-req] info from server:%s" % server)
return resp
except HTTPError as e:
if e.code in [HTTPStatus.INTERNAL_SERVER_ERROR, HTTPStatus.BAD_GATEWAY,
HTTPStatus.SERVICE_UNAVAILABLE]:
logger.warning("[do-sync-req] server:%s is not available for reason:%s" % (server, e.msg))
else:
raise
except socket.timeout:
logger.warning("[do-sync-req] %s request timeout" % server)
except URLError as e:
logger.warning("[do-sync-req] %s connection error:%s" % (server, e.reason))
tries += 1
if tries >= len(self.server_list):
logger.error("[do-sync-req] %s maybe down, no server is currently available" % server)
raise NacosRequestException("All server are not available")
self.change_server()
logger.warning("[do-sync-req] %s maybe down, skip to next" % server)
def _do_pulling(self, cache_list, queue):
cache_pool = dict()
for cache_key in cache_list:
cache_pool[cache_key] = CacheData(cache_key, self)
while cache_list:
unused_keys = set(cache_pool.keys())
contains_init_key = False
probe_update_string = ""
for cache_key in cache_list:
cache_data = cache_pool.get(cache_key)
if not cache_data:
logger.debug("[do-pulling] new key added: %s" % cache_key)
cache_data = CacheData(cache_key, self)
cache_pool[cache_key] = cache_data
else:
unused_keys.remove(cache_key)
if cache_data.is_init:
contains_init_key = True
data_id, group, namespace = parse_key(cache_key)
probe_update_string += WORD_SEPARATOR.join(
[data_id, group, cache_data.md5 or "", self.namespace]) + LINE_SEPARATOR
for k in unused_keys:
logger.debug("[do-pulling] %s is no longer watched, remove from cache" % k)
cache_pool.pop(k)
logger.debug(
"[do-pulling] try to detected change from server probe string is %s" % truncate(probe_update_string))
headers = {"Long-Pulling-Timeout": int(self.pulling_timeout * 1000)}
# if contains_init_key:
# headers["longPullingNoHangUp"] = "true"
data = {"Listening-Configs": probe_update_string}
changed_keys = list()
try:
resp = self._do_sync_req("/nacos/v1/cs/configs/listener", headers, None, data,
self.pulling_timeout + 10, "POST")
changed_keys = [group_key(*i) for i in parse_pulling_result(resp.read())]
logger.debug("[do-pulling] following keys are changed from server %s" % truncate(str(changed_keys)))
except NacosException as e:
logger.error("[do-pulling] nacos exception: %s, waiting for recovery" % str(e))
time.sleep(1)
except Exception as e:
logger.exception("[do-pulling] exception %s occur, return empty list, waiting for recovery" % str(e))
time.sleep(1)
for cache_key, cache_data in cache_pool.items():
cache_data.is_init = False
if cache_key in changed_keys:
data_id, group, namespace = parse_key(cache_key)
content = self.get_config(data_id, group)
cache_data.md5 = NacosClient.get_md5(content)
cache_data.content = content
queue.put((cache_key, cache_data.content, cache_data.md5))
@synchronized_with_attr("pulling_lock")
def _init_pulling(self):
if self.puller_mapping is not None:
logger.info("[init-pulling] puller is already initialized")
return
self.puller_mapping = dict()
self.notify_queue = Queue()
self.callback_tread_pool = pool.ThreadPool(self.callback_thread_num)
self.process_mgr = Manager()
t = Thread(target=self._process_polling_result)
t.setDaemon(True)
t.start()
logger.info("[init-pulling] init completed")
def _process_polling_result(self):
while True:
cache_key, content, md5 = self.notify_queue.get()
logger.debug("[process-polling-result] receive an event:%s" % cache_key)
wl = self.watcher_mapping.get(cache_key)
if not wl:
logger.warning("[process-polling-result] no watcher on %s, ignored" % cache_key)
continue
data_id, group, namespace = parse_key(cache_key)
plain_content = content
params = {
"data_id": data_id,
"group": group,
"namespace": namespace,
"raw_content": content,
"content": plain_content,
}
for watcher in wl:
if not watcher.last_md5 == md5:
logger.debug(
"[process-polling-result] md5 changed since last call, calling %s with changed params: %s"
% (watcher.callback.__name__, params))
try:
self.callback_tread_pool.apply(watcher.callback, (params,))
except Exception as e:
logger.exception("[process-polling-result] exception %s occur while calling %s " % (
str(e), watcher.callback.__name__))
watcher.last_md5 = md5
def _get_common_headers(self, params, data):
return {}
def _build_metadata(self, metadata, params):
if metadata:
if isinstance(metadata, dict):
params["metadata"] = json.dumps(metadata)
else:
params["metadata"] = metadata
def add_naming_instance(self, service_name, ip, port, cluster_name=None, weight=1.0, metadata=None,
enable=True, healthy=True, ephemeral=True, group_name=DEFAULT_GROUP_NAME):
logger.info("[add-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"weight": weight,
"enable": enable,
"healthy": healthy,
"clusterName": cluster_name,
"ephemeral": ephemeral,
"groupName": group_name
}
self._build_metadata(metadata, params)
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "POST")
c = resp.read()
logger.info("[add-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[add-naming-instance] exception %s occur" % str(e))
raise
def remove_naming_instance(self, service_name, ip, port, cluster_name=None, ephemeral=True,
group_name=DEFAULT_GROUP_NAME):
logger.info("[remove-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"ephemeral": ephemeral,
"groupName": group_name
}
if cluster_name is not None:
params["clusterName"] = cluster_name
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "DELETE")
c = resp.read()
logger.info("[remove-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[remove-naming-instance] exception %s occur" % str(e))
raise
def modify_naming_instance(self, service_name, ip, port, cluster_name=None, weight=None, metadata=None,
enable=None, ephemeral=True, group_name=DEFAULT_GROUP_NAME):
logger.info("[modify-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (
ip, port, service_name, self.namespace))
params = {
"ip": ip,
"port": port,
"serviceName": service_name,
"ephemeral": ephemeral,
"groupName": group_name
}
if cluster_name is not None:
params["clusterName"] = cluster_name
if enable is not None:
params["enable"] = enable
if weight is not None:
params["weight"] = weight
self._build_metadata(metadata, params)
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, None, params, self.default_timeout, "PUT")
c = resp.read()
logger.info("[modify-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" % (
ip, port, service_name, self.namespace, c))
return c == b"ok"
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[modify-naming-instance] exception %s occur" % str(e))
raise
def list_naming_instance(self, service_name, clusters=None, namespace_id=None, group_name=None, healthy_only=False):
"""
:param service_name: 服务名
:param clusters: 集群名称 字符串,多个集群用逗号分隔
:param namespace_id: 命名空间ID
:param group_name: 分组名
:param healthy_only: 是否只返回健康实例 否,默认为false
"""
logger.info("[list-naming-instance] service_name:%s, namespace:%s" % (service_name, self.namespace))
params = {
"serviceName": service_name,
"healthyOnly": healthy_only
}
if clusters is not None:
params["clusters"] = clusters
namespace_id = namespace_id or self.namespace
if namespace_id:
params["namespaceId"] = namespace_id
group_name = group_name or 'DEFAULT_GROUP'
if group_name:
params['groupName'] = group_name
try:
resp = self._do_sync_req("/nacos/v1/ns/instance/list", None, params, None, self.default_timeout, "GET")
c = resp.read()
logger.info("[list-naming-instance] service_name:%s, namespace:%s, server response:%s" %
(service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[list-naming-instance] exception %s occur" % str(e))
raise
def get_naming_instance(self, service_name, ip, port, cluster_name=None):
logger.info("[get-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s" % (ip, port, service_name,
self.namespace))
params = {
"serviceName": service_name,
"ip": ip,
"port": port,
}
if cluster_name is not None:
params["cluster"] = cluster_name
params["clusterName"] = cluster_name
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance", None, params, None, self.default_timeout, "GET")
c = resp.read()
logger.info("[get-naming-instance] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" %
(ip, port, service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[get-naming-instance] exception %s occur" % str(e))
raise
def send_heartbeat(self, service_name, ip, port, cluster_name=None, weight=1.0, metadata=None, ephemeral=True,
group_name=DEFAULT_GROUP_NAME):
logger.info("[send-heartbeat] ip:%s, port:%s, service_name:%s, namespace:%s" % (ip, port, service_name,
self.namespace))
beat_data = {
"serviceName": service_name,
"ip": ip,
"port": port,
"weight": weight,
"ephemeral": ephemeral
}
if cluster_name is not None:
beat_data["cluster"] = cluster_name
if metadata is not None:
if isinstance(metadata, str):
beat_data["metadata"] = json.loads(metadata)
else:
beat_data["metadata"] = metadata
params = {
"serviceName": service_name,
"beat": json.dumps(beat_data),
"groupName": group_name
}
if self.namespace:
params["namespaceId"] = self.namespace
try:
resp = self._do_sync_req("/nacos/v1/ns/instance/beat", None, params, None, self.default_timeout, "PUT")
c = resp.read()
logger.info("[send-heartbeat] ip:%s, port:%s, service_name:%s, namespace:%s, server response:%s" %
(ip, port, service_name, self.namespace, c))
return json.loads(c.decode("UTF-8"))
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise NacosException("Insufficient privilege.")
else:
raise NacosException("Request Error, code is %s" % e.code)
except Exception as e:
logger.exception("[send-heartbeat] exception %s occur" % str(e))
raise
def subscribe(self,
listener_fn, listener_interval=7, *args, **kwargs):
"""
reference at `/nacos/v1/ns/instance/list` in https://nacos.io/zh-cn/docs/open-api.html
:param listener_fn 监听方法,可以是元组,列表,单个监听方法
:param listener_interval 监听间隔,在 HTTP 请求 OpenAPI 时间间隔
:return:
"""
service_name = kwargs.get("service_name")
if not service_name:
if len(args) > 0:
service_name = args[0]
else:
raise NacosException("`service_name` is required in subscribe")
self.subscribed_local_manager.add_local_listener(key=service_name, listener_fn=listener_fn)
# 判断是否是第一次订阅调用
class _InnerSubContext(object):
first_sub = True
def _compare_and_trigger_listener():
# invoke `list_naming_instance`
latest_res = self.list_naming_instance(*args, **kwargs)
latest_instances = latest_res['hosts']
# 获取本地缓存实例
local_service_instances_dict = self.subscribed_local_manager.get_local_instances(service_name)
# 当前本地没有缓存,所有都是新的实例
if not local_service_instances_dict:
if not latest_instances or len(latest_instances) < 1:
# 第一次订阅调用不通知
if _InnerSubContext.first_sub:
_InnerSubContext.first_sub = False
return
for instance in latest_instances:
slc = SubscribedLocalInstance(key=service_name, instance=instance)
self.subscribed_local_manager.add_local_instance(slc)
# 第一次订阅调用不通知
if _InnerSubContext.first_sub:
_InnerSubContext.first_sub = False
return
self.subscribed_local_manager.do_listener_launch(service_name, Event.ADDED, slc)
else:
local_service_instances_dict_copy = local_service_instances_dict.copy()
for instance in latest_instances:
slc = SubscribedLocalInstance(key=service_name, instance=instance)
local_slc = local_service_instances_dict.get(slc.instance_id)
# 本地不存在实例缓存
if local_slc is None:
self.subscribed_local_manager.add_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.ADDED, slc)
# 本地存在实例缓存
else:
local_slc_md5 = local_slc.md5
local_slc_id = local_slc.instance_id
local_service_instances_dict_copy.pop(local_slc_id)
# 比较md5,存在实例变更
if local_slc_md5 != slc.md5:
self.subscribed_local_manager.remove_local_instance(local_slc).add_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.MODIFIED, slc)
# still have instances in local marked deleted
if len(local_service_instances_dict_copy) > 0:
for local_slc_id, slc in local_service_instances_dict_copy.items():
self.subscribed_local_manager.remove_local_instance(slc)
self.subscribed_local_manager.do_listener_launch(service_name, Event.DELETED, slc)
timer_name = 'service-subscribe-timer-{key}'.format(key=service_name)
subscribe_timer = NacosTimer(name=timer_name,
interval=listener_interval,
fn=_compare_and_trigger_listener)
subscribe_timer.scheduler()
self.subscribe_timer_manager.add_timer(subscribe_timer)
def unsubscribe(self, service_name, listener_name=None):
"""
remove listener from subscribed listener manager
:param service_name: service_name
:param listener_name: listener name
:return:
"""
listener_manager = self.subscribed_local_manager.get_local_listener_manager(key=service_name)
if not listener_manager:
return
if listener_name:
listener_manager.remove_listener(listener_name)
return
listener_manager.empty_listeners()
def stop_subscribe(self):
"""
stop subscribe timer scheduler
:return:
"""
self.subscribe_timer_manager.stop()
if DEBUG:
NacosClient.set_debugging() | zdppy-nacos | /zdppy_nacos-0.1.0-py3-none-any.whl/zdppy_nacos/client.py | client.py |
from abc import abstractmethod
class Event(object):
ADDED = "ADDED"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
pass
class AbstractListener(object):
def __init__(self, listener_name):
self._listener_name = listener_name
@property
def listener_name(self):
return self._listener_name
@abstractmethod
def launch(self, *args, **kwargs):
pass
class AbstractListenerManager(object):
@abstractmethod
def manager_context(self):
pass
@abstractmethod
def add_listener(self, listener):
pass
@abstractmethod
def remove_listener(self, listener_name):
pass
@abstractmethod
def empty_listeners(self):
pass
@abstractmethod
def do_launch(self):
pass
class SubscribeListener(AbstractListener):
def __init__(self, fn, listener_name):
"""
:rtype: object
"""
super(SubscribeListener, self).__init__(listener_name)
self._fn = fn
def launch(self, event, *args, **kwargs):
self._fn(event, *args, **kwargs)
class SimpleListenerManager(AbstractListenerManager):
def __init__(self):
# listener_name --> listener
self._listener_container = dict()
@property
def manager_context(self):
return self._listener_container
def merge_listeners(self, other_manager):
if not other_manager or not isinstance(other_manager, AbstractListenerManager):
return
for listener_name, listener in other_manager.manager_context():
self._listener_container[listener_name] = listener
def all_listeners(self):
return self._listener_container
def add_listener(self, listener):
self._listener_container[listener.listener_name] = listener
return self
def add_listeners(self, *listeners):
[self.add_listener(listener) for listener in listeners]
return self
def remove_listener(self, listener_name):
if listener_name not in self._listener_container.keys():
return self
self._listener_container.pop(listener_name)
return self
def empty_listeners(self):
self._listener_container.clear()
def do_launch(self, *args, **kwargs):
for _, listener in self._listener_container.items():
listener.launch(*args, **kwargs) | zdppy-nacos | /zdppy_nacos-0.1.0-py3-none-any.whl/zdppy_nacos/listener.py | listener.py |
import threading
from zdppy_orm import *
from zdppy_orm import Alias
from zdppy_orm import CompoundSelectQuery
from zdppy_orm import Metadata
from zdppy_orm import SENTINEL
from zdppy_orm import callable_
_clone_set = lambda s: set(s) if s else set()
def model_to_dict(model, recurse=True, backrefs=False, only=None,
exclude=None, seen=None, extra_attrs=None,
fields_from_query=None, max_depth=None, manytomany=False):
"""
Convert a model instance (and any related objects) to a dictionary.
:param bool recurse: Whether foreign-keys should be recursed.
:param bool backrefs: Whether lists of related objects should be recursed.
:param only: A list (or set) of field instances indicating which fields
should be included.
:param exclude: A list (or set) of field instances that should be
excluded from the dictionary.
:param list extra_attrs: Names of model instance attributes or methods
that should be included.
:param SelectQuery fields_from_query: Query that was source of model. Take
fields explicitly selected by the query and serialize them.
:param int max_depth: Maximum depth to recurse, value <= 0 means no max.
:param bool manytomany: Process many-to-many fields.
"""
max_depth = -1 if max_depth is None else max_depth
if max_depth == 0:
recurse = False
only = _clone_set(only)
extra_attrs = _clone_set(extra_attrs)
should_skip = lambda n: (n in exclude) or (only and (n not in only))
if fields_from_query is not None:
for item in fields_from_query._returning:
if isinstance(item, Field):
only.add(item)
elif isinstance(item, Alias):
extra_attrs.add(item._alias)
data = {}
exclude = _clone_set(exclude)
seen = _clone_set(seen)
exclude |= seen
model_class = type(model)
if manytomany:
for name, m2m in model._meta.manytomany.items():
if should_skip(name):
continue
exclude.update((m2m, m2m.rel_model._meta.manytomany[m2m.backref]))
for fkf in m2m.through_model._meta.refs:
exclude.add(fkf)
accum = []
for rel_obj in getattr(model, name):
accum.append(model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
max_depth=max_depth - 1))
data[name] = accum
for field in model._meta.sorted_fields:
if should_skip(field):
continue
field_data = model.__data__.get(field.name)
if isinstance(field, ForeignKeyField) and recurse:
if field_data is not None:
seen.add(field)
rel_obj = getattr(model, field.name)
field_data = model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
seen=seen,
max_depth=max_depth - 1)
else:
field_data = None
data[field.name] = field_data
if extra_attrs:
for attr_name in extra_attrs:
attr = getattr(model, attr_name)
if callable_(attr):
data[attr_name] = attr()
else:
data[attr_name] = attr
if backrefs and recurse:
for foreign_key, rel_model in model._meta.backrefs.items():
if foreign_key.backref == '+': continue
descriptor = getattr(model_class, foreign_key.backref)
if descriptor in exclude or foreign_key in exclude:
continue
if only and (descriptor not in only) and (foreign_key not in only):
continue
accum = []
exclude.add(foreign_key)
related_query = getattr(model, foreign_key.backref)
for rel_obj in related_query:
accum.append(model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
max_depth=max_depth - 1))
data[foreign_key.backref] = accum
return data
def update_model_from_dict(instance, data, ignore_unknown=False):
meta = instance._meta
backrefs = dict([(fk.backref, fk) for fk in meta.backrefs])
for key, value in data.items():
if key in meta.combined:
field = meta.combined[key]
is_backref = False
elif key in backrefs:
field = backrefs[key]
is_backref = True
elif ignore_unknown:
setattr(instance, key, value)
continue
else:
raise AttributeError('Unrecognized attribute "%s" for model '
'class %s.' % (key, type(instance)))
is_foreign_key = isinstance(field, ForeignKeyField)
if not is_backref and is_foreign_key and isinstance(value, dict):
try:
rel_instance = instance.__rel__[field.name]
except KeyError:
rel_instance = field.rel_model()
setattr(
instance,
field.name,
update_model_from_dict(rel_instance, value, ignore_unknown))
elif is_backref and isinstance(value, (list, tuple)):
instances = [
dict_to_model(field.model, row_data, ignore_unknown)
for row_data in value]
for rel_instance in instances:
setattr(rel_instance, field.name, instance)
setattr(instance, field.backref, instances)
else:
setattr(instance, field.name, value)
return instance
def dict_to_model(model_class, data, ignore_unknown=False):
return update_model_from_dict(model_class(), data, ignore_unknown)
def insert_where(cls, data, where=None):
"""
Helper for generating conditional INSERT queries.
For example, prevent INSERTing a new tweet if the user has tweeted within
the last hour::
INSERT INTO "tweet" ("user_id", "content", "timestamp")
SELECT 234, 'some content', now()
WHERE NOT EXISTS (
SELECT 1 FROM "tweet"
WHERE user_id = 234 AND timestamp > now() - interval '1 hour')
Using this helper:
cond = ~fn.EXISTS(Tweet.select().where(
Tweet.user == user_obj,
Tweet.timestamp > one_hour_ago))
iq = insert_where(Tweet, {
Tweet.user: user_obj,
Tweet.content: 'some content'}, where=cond)
res = iq.execute()
"""
for field, default in cls._meta.defaults.items():
if field.name in data or field in data: continue
value = default() if callable_(default) else default
data[field] = value
fields, values = zip(*data.items())
sq = Select(columns=values).where(where)
return cls.insert_from(sq, fields).as_rowcount()
class ReconnectMixin(object):
"""
Mixin class that attempts to automatically reconnect to the database under
certain error conditions.
For example, MySQL servers will typically close connections that are idle
for 28800 seconds ("wait_timeout" setting). If your application makes use
of long-lived connections, you may find your connections are closed after
a period of no activity. This mixin will attempt to reconnect automatically
when these errors occur.
This mixin class probably should not be used with Postgres (unless you
REALLY know what you are doing) and definitely has no business being used
with Sqlite. If you wish to use with Postgres, you will need to adapt the
`reconnect_errors` attribute to something appropriate for Postgres.
"""
reconnect_errors = (
# Error class, error message fragment (or empty string for all).
(OperationalError, '2006'), # MySQL server has gone away.
(OperationalError, '2013'), # Lost connection to MySQL server.
(OperationalError, '2014'), # Commands out of sync.
(OperationalError, '4031'), # Client interaction timeout.
# mysql-connector raises a slightly different error when an idle
# connection is terminated by the server. This is equivalent to 2013.
(OperationalError, 'MySQL Connection not available.'),
)
def __init__(self, *args, **kwargs):
super(ReconnectMixin, self).__init__(*args, **kwargs)
# Normalize the reconnect errors to a more efficient data-structure.
self._reconnect_errors = {}
for exc_class, err_fragment in self.reconnect_errors:
self._reconnect_errors.setdefault(exc_class, [])
self._reconnect_errors[exc_class].append(err_fragment.lower())
def execute_sql(self, sql, params=None, commit=SENTINEL):
try:
return super(ReconnectMixin, self).execute_sql(sql, params, commit)
except Exception as exc:
exc_class = type(exc)
if exc_class not in self._reconnect_errors:
raise exc
exc_repr = str(exc).lower()
for err_fragment in self._reconnect_errors[exc_class]:
if err_fragment in exc_repr:
break
else:
raise exc
if not self.is_closed():
self.close()
self.connect()
return super(ReconnectMixin, self).execute_sql(sql, params, commit)
def resolve_multimodel_query(query, key='_model_identifier'):
mapping = {}
accum = [query]
while accum:
curr = accum.pop()
if isinstance(curr, CompoundSelectQuery):
accum.extend((curr.lhs, curr.rhs))
continue
model_class = curr.model
name = model_class._meta.table_name
mapping[name] = model_class
curr._returning.append(Value(name).alias(key))
def wrapped_iterator():
for row in query.dicts().iterator():
identifier = row.pop(key)
model = mapping[identifier]
yield model(**row)
return wrapped_iterator()
class ThreadSafeDatabaseMetadata(Metadata):
"""
Metadata class to allow swapping database at run-time in a multi-threaded
application. To use:
class Base(Model):
class Meta:
model_metadata_class = ThreadSafeDatabaseMetadata
"""
def __init__(self, *args, **kwargs):
# The database attribute is stored in a thread-local.
self._database = None
self._local = threading.local()
super(ThreadSafeDatabaseMetadata, self).__init__(*args, **kwargs)
def _get_db(self):
return getattr(self._local, 'database', self._database)
def _set_db(self, db):
if self._database is None:
self._database = db
self._local.database = db
database = property(_get_db, _set_db) | zdppy-orm | /zdppy_orm-0.1.4-py3-none-any.whl/zdppy_orm/shortcuts.py | shortcuts.py |
import heapq
import logging
import random
import time
from collections import namedtuple
from itertools import chain
try:
from psycopg2.extensions import TRANSACTION_STATUS_IDLE
from psycopg2.extensions import TRANSACTION_STATUS_INERROR
from psycopg2.extensions import TRANSACTION_STATUS_UNKNOWN
except ImportError:
TRANSACTION_STATUS_IDLE = \
TRANSACTION_STATUS_INERROR = \
TRANSACTION_STATUS_UNKNOWN = None
from zdppy_orm import MySQLDatabase
from zdppy_orm import PostgresqlDatabase
from zdppy_orm import SqliteDatabase
logger = logging.getLogger('peewee.pool')
def make_int(val):
if val is not None and not isinstance(val, (int, float)):
return int(val)
return val
class MaxConnectionsExceeded(ValueError): pass
PoolConnection = namedtuple('PoolConnection', ('timestamp', 'connection',
'checked_out'))
class PooledDatabase(object):
def __init__(self, database, max_connections=20, stale_timeout=None,
timeout=None, **kwargs):
self._max_connections = make_int(max_connections)
self._stale_timeout = make_int(stale_timeout)
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
# Available / idle connections stored in a heap, sorted oldest first.
self._connections = []
# Mapping of connection id to PoolConnection. Ordinarily we would want
# to use something like a WeakKeyDictionary, but Python typically won't
# allow us to create weak references to connection objects.
self._in_use = {}
# Use the memory address of the connection as the key in the event the
# connection object is not hashable. Connections will not get
# garbage-collected, however, because a reference to them will persist
# in "_in_use" as long as the conn has not been closed.
self.conn_key = id
super(PooledDatabase, self).__init__(database, **kwargs)
def init(self, database, max_connections=None, stale_timeout=None,
timeout=None, **connect_kwargs):
super(PooledDatabase, self).init(database, **connect_kwargs)
if max_connections is not None:
self._max_connections = make_int(max_connections)
if stale_timeout is not None:
self._stale_timeout = make_int(stale_timeout)
if timeout is not None:
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
def connect(self, reuse_if_open=False):
if not self._wait_timeout:
return super(PooledDatabase, self).connect(reuse_if_open)
expires = time.time() + self._wait_timeout
while expires > time.time():
try:
ret = super(PooledDatabase, self).connect(reuse_if_open)
except MaxConnectionsExceeded:
time.sleep(0.1)
else:
return ret
raise MaxConnectionsExceeded('Max connections exceeded, timed out '
'attempting to connect.')
def _connect(self):
while True:
try:
# Remove the oldest connection from the heap.
ts, conn = heapq.heappop(self._connections)
key = self.conn_key(conn)
except IndexError:
ts = conn = None
logger.debug('No connection available in pool.')
break
else:
if self._is_closed(conn):
# This connecton was closed, but since it was not stale
# it got added back to the queue of available conns. We
# then closed it and marked it as explicitly closed, so
# it's safe to throw it away now.
# (Because Database.close() calls Database._close()).
logger.debug('Connection %s was closed.', key)
ts = conn = None
elif self._stale_timeout and self._is_stale(ts):
# If we are attempting to check out a stale connection,
# then close it. We don't need to mark it in the "closed"
# set, because it is not in the list of available conns
# anymore.
logger.debug('Connection %s was stale, closing.', key)
self._close(conn, True)
ts = conn = None
else:
break
if conn is None:
if self._max_connections and (
len(self._in_use) >= self._max_connections):
raise MaxConnectionsExceeded('Exceeded maximum connections.')
conn = super(PooledDatabase, self)._connect()
ts = time.time() - random.random() / 1000
key = self.conn_key(conn)
logger.debug('Created new connection %s.', key)
self._in_use[key] = PoolConnection(ts, conn, time.time())
return conn
def _is_stale(self, timestamp):
# Called on check-out and check-in to ensure the connection has
# not outlived the stale timeout.
return (time.time() - timestamp) > self._stale_timeout
def _is_closed(self, conn):
return False
def _can_reuse(self, conn):
# Called on check-in to make sure the connection can be re-used.
return True
def _close(self, conn, close_conn=False):
key = self.conn_key(conn)
if close_conn:
super(PooledDatabase, self)._close(conn)
elif key in self._in_use:
pool_conn = self._in_use.pop(key)
if self._stale_timeout and self._is_stale(pool_conn.timestamp):
logger.debug('Closing stale connection %s.', key)
super(PooledDatabase, self)._close(conn)
elif self._can_reuse(conn):
logger.debug('Returning %s to pool.', key)
heapq.heappush(self._connections, (pool_conn.timestamp, conn))
else:
logger.debug('Closed %s.', key)
def manual_close(self):
"""
Close the underlying connection without returning it to the pool.
"""
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True)
def close_idle(self):
# Close any open connections that are not currently in-use.
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
self._connections = []
def close_stale(self, age=600):
# Close any connections that are in-use but were checked out quite some
# time ago and can be considered stale.
with self._lock:
in_use = {}
cutoff = time.time() - age
n = 0
for key, pool_conn in self._in_use.items():
if pool_conn.checked_out < cutoff:
self._close(pool_conn.connection, close_conn=True)
n += 1
else:
in_use[key] = pool_conn
self._in_use = in_use
return n
def close_all(self):
# Close all connections -- available and in-use. Warning: may break any
# active connections used by other threads.
self.close()
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
for pool_conn in self._in_use.values():
self._close(pool_conn.connection, close_conn=True)
self._connections = []
self._in_use = {}
class PooledMySQLDatabase(PooledDatabase, MySQLDatabase):
def _is_closed(self, conn):
try:
conn.ping(False)
except:
return True
else:
return False
class _PooledPostgresqlDatabase(PooledDatabase):
def _is_closed(self, conn):
if conn.closed:
return True
txn_status = conn.get_transaction_status()
if txn_status == TRANSACTION_STATUS_UNKNOWN:
return True
elif txn_status != TRANSACTION_STATUS_IDLE:
conn.rollback()
return False
def _can_reuse(self, conn):
txn_status = conn.get_transaction_status()
# Do not return connection in an error state, as subsequent queries
# will all fail. If the status is unknown then we lost the connection
# to the server and the connection should not be re-used.
if txn_status == TRANSACTION_STATUS_UNKNOWN:
return False
elif txn_status == TRANSACTION_STATUS_INERROR:
conn.reset()
elif txn_status != TRANSACTION_STATUS_IDLE:
conn.rollback()
return True
class PooledPostgresqlDatabase(_PooledPostgresqlDatabase, PostgresqlDatabase):
pass
try:
from playhouse.postgres_ext import PostgresqlExtDatabase
class PooledPostgresqlExtDatabase(_PooledPostgresqlDatabase, PostgresqlExtDatabase):
pass
except ImportError:
PooledPostgresqlExtDatabase = None
class _PooledSqliteDatabase(PooledDatabase):
def _is_closed(self, conn):
try:
conn.total_changes
except:
return True
else:
return False
class PooledSqliteDatabase(_PooledSqliteDatabase, SqliteDatabase):
pass
try:
from playhouse.sqlite_ext import SqliteExtDatabase
class PooledSqliteExtDatabase(_PooledSqliteDatabase, SqliteExtDatabase):
pass
except ImportError:
PooledSqliteExtDatabase = None
try:
from playhouse.sqlite_ext import CSqliteExtDatabase
class PooledCSqliteExtDatabase(_PooledSqliteDatabase, CSqliteExtDatabase):
pass
except ImportError:
PooledCSqliteExtDatabase = None | zdppy-orm | /zdppy_orm-0.1.4-py3-none-any.whl/zdppy_orm/pool.py | pool.py |
import json
from . import InterfaceError
try:
import mysql.connector as mysql_connector
except ImportError:
mysql_connector = None
try:
import mariadb
except ImportError:
mariadb = None
from peewee import ImproperlyConfigured
from peewee import Insert
from peewee import MySQLDatabase
from peewee import NodeList
from peewee import SQL
from peewee import TextField
from peewee import fn
class MySQLConnectorDatabase(MySQLDatabase):
def _connect(self):
if mysql_connector is None:
raise ImproperlyConfigured('MySQL connector not installed!')
return mysql_connector.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
if self.autoconnect:
self.connect()
else:
raise InterfaceError('Error, database connection not opened.')
return self._state.conn.cursor(buffered=True)
class MariaDBConnectorDatabase(MySQLDatabase):
def _connect(self):
if mariadb is None:
raise ImproperlyConfigured('mariadb connector not installed!')
self.connect_params.pop('charset', None)
self.connect_params.pop('sql_mode', None)
self.connect_params.pop('use_unicode', None)
return mariadb.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
if self.autoconnect:
self.connect()
else:
raise InterfaceError('Error, database connection not opened.')
return self._state.conn.cursor(buffered=True)
def _set_server_version(self, conn):
version = conn.server_version
version, point = divmod(version, 100)
version, minor = divmod(version, 100)
self.server_version = (version, minor, point)
if self.server_version >= (10, 5, 0):
self.returning_clause = True
def last_insert_id(self, cursor, query_type=None):
if not self.returning_clause:
return cursor.lastrowid
elif query_type == Insert.SIMPLE:
try:
return cursor[0][0]
except (AttributeError, IndexError):
return cursor.lastrowid
return cursor
class JSONField(TextField):
field_type = 'JSON'
def db_value(self, value):
if value is not None:
return json.dumps(value)
def python_value(self, value):
if value is not None:
return json.loads(value)
def Match(columns, expr, modifier=None):
if isinstance(columns, (list, tuple)):
match = fn.MATCH(*columns) # Tuple of one or more columns / fields.
else:
match = fn.MATCH(columns) # Single column / field.
args = expr if modifier is None else NodeList((expr, SQL(modifier)))
return NodeList((match, fn.AGAINST(args))) | zdppy-orm | /zdppy_orm-0.1.4-py3-none-any.whl/zdppy_orm/mysql_ext.py | mysql_ext.py |
from bisect import bisect_left
from bisect import bisect_right
from contextlib import contextmanager
from copy import deepcopy
from functools import wraps
from inspect import isclass
import calendar
import collections
import datetime
import decimal
import hashlib
import itertools
import logging
import operator
import re
import socket
import struct
import sys
import threading
import time
import uuid
import warnings
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
try:
from pysqlite3 import dbapi2 as pysq3
except ImportError:
try:
from pysqlite2 import dbapi2 as pysq3
except ImportError:
pysq3 = None
try:
import sqlite3
except ImportError:
sqlite3 = pysq3
else:
if pysq3 and pysq3.sqlite_version_info >= sqlite3.sqlite_version_info:
sqlite3 = pysq3
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
try:
import psycopg2
from psycopg2 import extensions as pg_extensions
try:
from psycopg2 import errors as pg_errors
except ImportError:
pg_errors = None
except ImportError:
psycopg2 = pg_errors = None
try:
from psycopg2.extras import register_uuid as pg_register_uuid
pg_register_uuid()
except Exception:
pass
mysql_passwd = False
try:
# import pymysql as mysql
import zdppy_mysql as mysql
except ImportError:
try:
import MySQLdb as mysql
mysql_passwd = True
except ImportError:
mysql = None
__version__ = '0.1.0'
__all__ = [
'AnyField',
'AsIs',
'AutoField',
'BareField',
'BigAutoField',
'BigBitField',
'BigIntegerField',
'BinaryUUIDField',
'BitField',
'BlobField',
'BooleanField',
'Case',
'Cast',
'CharField',
'Check',
'chunked',
'Column',
'CompositeKey',
'Context',
'Database',
'DatabaseError',
'DatabaseProxy',
'DataError',
'DateField',
'DateTimeField',
'DecimalField',
'DeferredForeignKey',
'DeferredThroughModel',
'DJANGO_MAP',
'DoesNotExist',
'DoubleField',
'DQ',
'EXCLUDED',
'Field',
'FixedCharField',
'FloatField',
'fn',
'ForeignKeyField',
'IdentityField',
'ImproperlyConfigured',
'Index',
'IntegerField',
'IntegrityError',
'InterfaceError',
'InternalError',
'IPField',
'JOIN',
'ManyToManyField',
'Model',
'ModelIndex',
'MySQLDatabase',
'NotSupportedError',
'OP',
'OperationalError',
'PostgresqlDatabase',
'PrimaryKeyField', # XXX: Deprecated, change to AutoField.
'prefetch',
'ProgrammingError',
'Proxy',
'QualifiedNames',
'SchemaManager',
'SmallIntegerField',
'Select',
'SQL',
'SqliteDatabase',
'Table',
'TextField',
'TimeField',
'TimestampField',
'Tuple',
'UUIDField',
'Value',
'ValuesList',
'Window',
]
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# 获取日志对象
logger = logging.getLogger('zdppy_orm')
logger.addHandler(NullHandler())
if sys.version_info[0] == 2:
text_type = unicode
bytes_type = str
buffer_type = buffer
izip_longest = itertools.izip_longest
callable_ = callable
multi_types = (list, tuple, frozenset, set)
exec('def reraise(tp, value, tb=None): raise tp, value, tb')
def print_(s):
sys.stdout.write(s)
sys.stdout.write('\n')
else:
import builtins
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
from functools import reduce
callable_ = lambda c: isinstance(c, Callable)
text_type = str
bytes_type = bytes
buffer_type = memoryview
basestring = str
long = int
multi_types = (list, tuple, frozenset, set, range)
print_ = getattr(builtins, 'print')
izip_longest = itertools.zip_longest
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, str)
sqlite3.register_adapter(datetime.date, str)
sqlite3.register_adapter(datetime.time, str)
__sqlite_version__ = sqlite3.sqlite_version_info
else:
__sqlite_version__ = (0, 0, 0)
__date_parts__ = set(('year', 'month', 'day', 'hour', 'minute', 'second'))
# Sqlite does not support the `date_part` SQL function, so we will define an
# implementation in python.
__sqlite_datetime_formats__ = (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d',
'%H:%M:%S',
'%H:%M:%S.%f',
'%H:%M')
__sqlite_date_trunc__ = {
'year': '%Y-01-01 00:00:00',
'month': '%Y-%m-01 00:00:00',
'day': '%Y-%m-%d 00:00:00',
'hour': '%Y-%m-%d %H:00:00',
'minute': '%Y-%m-%d %H:%M:00',
'second': '%Y-%m-%d %H:%M:%S'}
__mysql_date_trunc__ = __sqlite_date_trunc__.copy()
__mysql_date_trunc__['minute'] = '%Y-%m-%d %H:%i:00'
__mysql_date_trunc__['second'] = '%Y-%m-%d %H:%i:%S'
def _sqlite_date_part(lookup_type, datetime_string):
assert lookup_type in __date_parts__
if not datetime_string:
return
dt = format_date_time(datetime_string, __sqlite_datetime_formats__)
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, datetime_string):
assert lookup_type in __sqlite_date_trunc__
if not datetime_string:
return
dt = format_date_time(datetime_string, __sqlite_datetime_formats__)
return dt.strftime(__sqlite_date_trunc__[lookup_type])
def __deprecated__(s):
warnings.warn(s, DeprecationWarning)
class attrdict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def __iadd__(self, rhs):
self.update(rhs);
return self
def __add__(self, rhs):
d = attrdict(self);
d.update(rhs);
return d
SENTINEL = object()
#: Operations for use in SQL expressions.
OP = attrdict(
AND='AND',
OR='OR',
ADD='+',
SUB='-',
MUL='*',
DIV='/',
BIN_AND='&',
BIN_OR='|',
XOR='#',
MOD='%',
EQ='=',
LT='<',
LTE='<=',
GT='>',
GTE='>=',
NE='!=',
IN='IN',
NOT_IN='NOT IN',
IS='IS',
IS_NOT='IS NOT',
LIKE='LIKE',
ILIKE='ILIKE',
BETWEEN='BETWEEN',
REGEXP='REGEXP',
IREGEXP='IREGEXP',
CONCAT='||',
BITWISE_NEGATION='~')
# To support "django-style" double-underscore filters, create a mapping between
# operation name and operation code, e.g. "__eq" == OP.EQ.
DJANGO_MAP = attrdict({
'eq': operator.eq,
'lt': operator.lt,
'lte': operator.le,
'gt': operator.gt,
'gte': operator.ge,
'ne': operator.ne,
'in': operator.lshift,
'is': lambda l, r: Expression(l, OP.IS, r),
'like': lambda l, r: Expression(l, OP.LIKE, r),
'ilike': lambda l, r: Expression(l, OP.ILIKE, r),
'regexp': lambda l, r: Expression(l, OP.REGEXP, r),
})
#: Mapping of field type to the data-type supported by the database. Databases
#: may override or add to this list.
FIELD = attrdict(
AUTO='INTEGER',
BIGAUTO='BIGINT',
BIGINT='BIGINT',
BLOB='BLOB',
BOOL='SMALLINT',
CHAR='CHAR',
DATE='DATE',
DATETIME='DATETIME',
DECIMAL='DECIMAL',
DEFAULT='',
DOUBLE='REAL',
FLOAT='REAL',
INT='INTEGER',
SMALLINT='SMALLINT',
TEXT='TEXT',
TIME='TIME',
UUID='TEXT',
UUIDB='BLOB',
VARCHAR='VARCHAR')
#: Join helpers (for convenience) -- all join types are supported, this object
#: is just to help avoid introducing errors by using strings everywhere.
JOIN = attrdict(
INNER='INNER JOIN',
LEFT_OUTER='LEFT OUTER JOIN',
RIGHT_OUTER='RIGHT OUTER JOIN',
FULL='FULL JOIN',
FULL_OUTER='FULL OUTER JOIN',
CROSS='CROSS JOIN',
NATURAL='NATURAL JOIN',
LATERAL='LATERAL',
LEFT_LATERAL='LEFT JOIN LATERAL')
# Row representations.
ROW = attrdict(
TUPLE=1,
DICT=2,
NAMED_TUPLE=3,
CONSTRUCTOR=4,
MODEL=5)
SCOPE_NORMAL = 1
SCOPE_SOURCE = 2
SCOPE_VALUES = 4
SCOPE_CTE = 8
SCOPE_COLUMN = 16
# Rules for parentheses around subqueries in compound select.
CSQ_PARENTHESES_NEVER = 0
CSQ_PARENTHESES_ALWAYS = 1
CSQ_PARENTHESES_UNNESTED = 2
# Regular expressions used to convert class names to snake-case table names.
# First regex handles acronym followed by word or initial lower-word followed
# by a capitalized word. e.g. APIResponse -> API_Response / fooBar -> foo_Bar.
# Second regex handles the normal case of two title-cased words.
SNAKE_CASE_STEP1 = re.compile('(.)_*([A-Z][a-z]+)')
SNAKE_CASE_STEP2 = re.compile('([a-z0-9])_*([A-Z])')
# Helper functions that are used in various parts of the codebase.
MODEL_BASE = '_metaclass_helper_'
def with_metaclass(meta, base=object):
return meta(MODEL_BASE, (base,), {})
def merge_dict(source, overrides):
merged = source.copy()
if overrides:
merged.update(overrides)
return merged
def quote(path, quote_chars):
if len(path) == 1:
return path[0].join(quote_chars)
return '.'.join([part.join(quote_chars) for part in path])
is_model = lambda o: isclass(o) and issubclass(o, Model)
def ensure_tuple(value):
if value is not None:
return value if isinstance(value, (list, tuple)) else (value,)
def ensure_entity(value):
if value is not None:
return value if isinstance(value, Node) else Entity(value)
def make_snake_case(s):
first = SNAKE_CASE_STEP1.sub(r'\1_\2', s)
return SNAKE_CASE_STEP2.sub(r'\1_\2', first).lower()
def chunked(it, n):
marker = object()
for group in (list(g) for g in izip_longest(*[iter(it)] * n,
fillvalue=marker)):
if group[-1] is marker:
del group[group.index(marker):]
yield group
class _callable_context_manager(object):
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with self:
return fn(*args, **kwargs)
return inner
class Proxy(object):
"""
Create a proxy or placeholder for another object.
"""
__slots__ = ('obj', '_callbacks')
def __init__(self):
self._callbacks = []
self.initialize(None)
def initialize(self, obj):
self.obj = obj
for callback in self._callbacks:
callback(obj)
def attach_callback(self, callback):
self._callbacks.append(callback)
return callback
def passthrough(method):
def inner(self, *args, **kwargs):
if self.obj is None:
raise AttributeError('Cannot use uninitialized Proxy.')
return getattr(self.obj, method)(*args, **kwargs)
return inner
# Allow proxy to be used as a context-manager.
__enter__ = passthrough('__enter__')
__exit__ = passthrough('__exit__')
def __getattr__(self, attr):
if self.obj is None:
raise AttributeError('Cannot use uninitialized Proxy.')
return getattr(self.obj, attr)
def __setattr__(self, attr, value):
if attr not in self.__slots__:
raise AttributeError('Cannot set attribute on proxy.')
return super(Proxy, self).__setattr__(attr, value)
class DatabaseProxy(Proxy):
"""
Proxy implementation specifically for proxying `Database` objects.
"""
def connection_context(self):
return ConnectionContext(self)
def atomic(self, *args, **kwargs):
return _atomic(self, *args, **kwargs)
def manual_commit(self):
return _manual(self)
def transaction(self, *args, **kwargs):
return _transaction(self, *args, **kwargs)
def savepoint(self):
return _savepoint(self)
class ModelDescriptor(object): pass
# SQL Generation.
class AliasManager(object):
__slots__ = ('_counter', '_current_index', '_mapping')
def __init__(self):
# A list of dictionaries containing mappings at various depths.
self._counter = 0
self._current_index = 0
self._mapping = []
self.push()
@property
def mapping(self):
return self._mapping[self._current_index - 1]
def add(self, source):
if source not in self.mapping:
self._counter += 1
self[source] = 't%d' % self._counter
return self.mapping[source]
def get(self, source, any_depth=False):
if any_depth:
for idx in reversed(range(self._current_index)):
if source in self._mapping[idx]:
return self._mapping[idx][source]
return self.add(source)
def __getitem__(self, source):
return self.get(source)
def __setitem__(self, source, alias):
self.mapping[source] = alias
def push(self):
self._current_index += 1
if self._current_index > len(self._mapping):
self._mapping.append({})
def pop(self):
if self._current_index == 1:
raise ValueError('Cannot pop() from empty alias manager.')
self._current_index -= 1
class State(collections.namedtuple('_State', ('scope', 'parentheses',
'settings'))):
def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, **kwargs):
return super(State, cls).__new__(cls, scope, parentheses, kwargs)
def __call__(self, scope=None, parentheses=None, **kwargs):
# Scope and settings are "inherited" (parentheses is not, however).
scope = self.scope if scope is None else scope
# Try to avoid unnecessary dict copying.
if kwargs and self.settings:
settings = self.settings.copy() # Copy original settings dict.
settings.update(kwargs) # Update copy with overrides.
elif kwargs:
settings = kwargs
else:
settings = self.settings
return State(scope, parentheses, **settings)
def __getattr__(self, attr_name):
return self.settings.get(attr_name)
def __scope_context__(scope):
@contextmanager
def inner(self, **kwargs):
with self(scope=scope, **kwargs):
yield self
return inner
class Context(object):
__slots__ = ('stack', '_sql', '_values', 'alias_manager', 'state')
def __init__(self, **settings):
self.stack = []
self._sql = []
self._values = []
self.alias_manager = AliasManager()
self.state = State(**settings)
def as_new(self):
return Context(**self.state.settings)
def column_sort_key(self, item):
return item[0].get_sort_key(self)
@property
def scope(self):
return self.state.scope
@property
def parentheses(self):
return self.state.parentheses
@property
def subquery(self):
return self.state.subquery
def __call__(self, **overrides):
if overrides and overrides.get('scope') == self.scope:
del overrides['scope']
self.stack.append(self.state)
self.state = self.state(**overrides)
return self
scope_normal = __scope_context__(SCOPE_NORMAL)
scope_source = __scope_context__(SCOPE_SOURCE)
scope_values = __scope_context__(SCOPE_VALUES)
scope_cte = __scope_context__(SCOPE_CTE)
scope_column = __scope_context__(SCOPE_COLUMN)
def __enter__(self):
if self.parentheses:
self.literal('(')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.parentheses:
self.literal(')')
self.state = self.stack.pop()
@contextmanager
def push_alias(self):
self.alias_manager.push()
yield
self.alias_manager.pop()
def sql(self, obj):
if isinstance(obj, (Node, Context)):
return obj.__sql__(self)
elif is_model(obj):
return obj._meta.table.__sql__(self)
else:
return self.sql(Value(obj))
def literal(self, keyword):
self._sql.append(keyword)
return self
def value(self, value, converter=None, add_param=True):
if converter:
value = converter(value)
elif converter is None and self.state.converter:
# Explicitly check for None so that "False" can be used to signify
# that no conversion should be applied.
value = self.state.converter(value)
if isinstance(value, Node):
with self(converter=None):
return self.sql(value)
elif is_model(value):
# Under certain circumstances, we could end-up treating a model-
# class itself as a value. This check ensures that we drop the
# table alias into the query instead of trying to parameterize a
# model (for instance, passing a model as a function argument).
with self.scope_column():
return self.sql(value)
if self.state.value_literals:
return self.literal(_query_val_transform(value))
self._values.append(value)
return self.literal(self.state.param or '?') if add_param else self
def __sql__(self, ctx):
ctx._sql.extend(self._sql)
ctx._values.extend(self._values)
return ctx
def parse(self, node):
return self.sql(node).query()
def query(self):
return ''.join(self._sql), self._values
def query_to_string(query):
# NOTE: this function is not exported by default as it might be misused --
# and this misuse could lead to sql injection vulnerabilities. This
# function is intended for debugging or logging purposes ONLY.
db = getattr(query, '_database', None)
if db is not None:
ctx = db.get_sql_context()
else:
ctx = Context()
sql, params = ctx.sql(query).query()
if not params:
return sql
param = ctx.state.param or '?'
if param == '?':
sql = sql.replace('?', '%s')
return sql % tuple(map(_query_val_transform, params))
def _query_val_transform(v):
# Interpolate parameters.
if isinstance(v, (text_type, datetime.datetime, datetime.date,
datetime.time)):
v = "'%s'" % v
elif isinstance(v, bytes_type):
try:
v = v.decode('utf8')
except UnicodeDecodeError:
v = v.decode('raw_unicode_escape')
v = "'%s'" % v
elif isinstance(v, int):
v = '%s' % int(v) # Also handles booleans -> 1 or 0.
elif v is None:
v = 'NULL'
else:
v = str(v)
return v
# AST.
class Node(object):
_coerce = True
def clone(self):
obj = self.__class__.__new__(self.__class__)
obj.__dict__ = self.__dict__.copy()
return obj
def __sql__(self, ctx):
raise NotImplementedError
@staticmethod
def copy(method):
def inner(self, *args, **kwargs):
clone = self.clone()
method(clone, *args, **kwargs)
return clone
return inner
def coerce(self, _coerce=True):
if _coerce != self._coerce:
clone = self.clone()
clone._coerce = _coerce
return clone
return self
def is_alias(self):
return False
def unwrap(self):
return self
class ColumnFactory(object):
__slots__ = ('node',)
def __init__(self, node):
self.node = node
def __getattr__(self, attr):
return Column(self.node, attr)
class _DynamicColumn(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
return ColumnFactory(instance) # Implements __getattr__().
return self
class _ExplicitColumn(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
raise AttributeError(
'%s specifies columns explicitly, and does not support '
'dynamic column lookups.' % instance)
return self
class Source(Node):
c = _DynamicColumn()
def __init__(self, alias=None):
super(Source, self).__init__()
self._alias = alias
@Node.copy
def alias(self, name):
self._alias = name
def select(self, *columns):
if not columns:
columns = (SQL('*'),)
return Select((self,), columns)
def join(self, dest, join_type=JOIN.INNER, on=None):
return Join(self, dest, join_type, on)
def left_outer_join(self, dest, on=None):
return Join(self, dest, JOIN.LEFT_OUTER, on)
def cte(self, name, recursive=False, columns=None, materialized=None):
return CTE(name, self, recursive=recursive, columns=columns,
materialized=materialized)
def get_sort_key(self, ctx):
if self._alias:
return (self._alias,)
return (ctx.alias_manager[self],)
def apply_alias(self, ctx):
# If we are defining the source, include the "AS alias" declaration. An
# alias is created for the source if one is not already defined.
if ctx.scope == SCOPE_SOURCE:
if self._alias:
ctx.alias_manager[self] = self._alias
ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self]))
return ctx
def apply_column(self, ctx):
if self._alias:
ctx.alias_manager[self] = self._alias
return ctx.sql(Entity(ctx.alias_manager[self]))
class _HashableSource(object):
def __init__(self, *args, **kwargs):
super(_HashableSource, self).__init__(*args, **kwargs)
self._update_hash()
@Node.copy
def alias(self, name):
self._alias = name
self._update_hash()
def _update_hash(self):
self._hash = self._get_hash()
def _get_hash(self):
return hash((self.__class__, self._path, self._alias))
def __hash__(self):
return self._hash
def __eq__(self, other):
if isinstance(other, _HashableSource):
return self._hash == other._hash
return Expression(self, OP.EQ, other)
def __ne__(self, other):
if isinstance(other, _HashableSource):
return self._hash != other._hash
return Expression(self, OP.NE, other)
def _e(op):
def inner(self, rhs):
return Expression(self, op, rhs)
return inner
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
def __bind_database__(meth):
@wraps(meth)
def inner(self, *args, **kwargs):
result = meth(self, *args, **kwargs)
if self._database:
return result.bind(self._database)
return result
return inner
def __join__(join_type=JOIN.INNER, inverted=False):
def method(self, other):
if inverted:
self, other = other, self
return Join(self, other, join_type=join_type)
return method
class BaseTable(Source):
__and__ = __join__(JOIN.INNER)
__add__ = __join__(JOIN.LEFT_OUTER)
__sub__ = __join__(JOIN.RIGHT_OUTER)
__or__ = __join__(JOIN.FULL_OUTER)
__mul__ = __join__(JOIN.CROSS)
__rand__ = __join__(JOIN.INNER, inverted=True)
__radd__ = __join__(JOIN.LEFT_OUTER, inverted=True)
__rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True)
__ror__ = __join__(JOIN.FULL_OUTER, inverted=True)
__rmul__ = __join__(JOIN.CROSS, inverted=True)
class _BoundTableContext(_callable_context_manager):
def __init__(self, table, database):
self.table = table
self.database = database
def __enter__(self):
self._orig_database = self.table._database
self.table.bind(self.database)
if self.table._model is not None:
self.table._model.bind(self.database)
return self.table
def __exit__(self, exc_type, exc_val, exc_tb):
self.table.bind(self._orig_database)
if self.table._model is not None:
self.table._model.bind(self._orig_database)
class Table(_HashableSource, BaseTable):
def __init__(self, name, columns=None, primary_key=None, schema=None,
alias=None, _model=None, _database=None):
self.__name__ = name
self._columns = columns
self._primary_key = primary_key
self._schema = schema
self._path = (schema, name) if schema else (name,)
self._model = _model
self._database = _database
super(Table, self).__init__(alias=alias)
# Allow tables to restrict what columns are available.
if columns is not None:
self.c = _ExplicitColumn()
for column in columns:
setattr(self, column, Column(self, column))
if primary_key:
col_src = self if self._columns else self.c
self.primary_key = getattr(col_src, primary_key)
else:
self.primary_key = None
def clone(self):
# Ensure a deep copy of the column instances.
return Table(
self.__name__,
columns=self._columns,
primary_key=self._primary_key,
schema=self._schema,
alias=self._alias,
_model=self._model,
_database=self._database)
def bind(self, database=None):
self._database = database
return self
def bind_ctx(self, database=None):
return _BoundTableContext(self, database)
def _get_hash(self):
return hash((self.__class__, self._path, self._alias, self._model))
@__bind_database__
def select(self, *columns):
if not columns and self._columns:
columns = [Column(self, column) for column in self._columns]
return Select((self,), columns)
@__bind_database__
def insert(self, insert=None, columns=None, **kwargs):
if kwargs:
insert = {} if insert is None else insert
src = self if self._columns else self.c
for key, value in kwargs.items():
insert[getattr(src, key)] = value
return Insert(self, insert=insert, columns=columns)
@__bind_database__
def replace(self, insert=None, columns=None, **kwargs):
return (self
.insert(insert=insert, columns=columns)
.on_conflict('REPLACE'))
@__bind_database__
def update(self, update=None, **kwargs):
if kwargs:
update = {} if update is None else update
for key, value in kwargs.items():
src = self if self._columns else self.c
update[getattr(src, key)] = value
return Update(self, update=update)
@__bind_database__
def delete(self):
return Delete(self)
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
# Return the quoted table name.
return ctx.sql(Entity(*self._path))
if self._alias:
ctx.alias_manager[self] = self._alias
if ctx.scope == SCOPE_SOURCE:
# Define the table and its alias.
return self.apply_alias(ctx.sql(Entity(*self._path)))
else:
# Refer to the table using the alias.
return self.apply_column(ctx)
class Join(BaseTable):
def __init__(self, lhs, rhs, join_type=JOIN.INNER, on=None, alias=None):
super(Join, self).__init__(alias=alias)
self.lhs = lhs
self.rhs = rhs
self.join_type = join_type
self._on = on
def on(self, predicate):
self._on = predicate
return self
def __sql__(self, ctx):
(ctx
.sql(self.lhs)
.literal(' %s ' % self.join_type)
.sql(self.rhs))
if self._on is not None:
ctx.literal(' ON ').sql(self._on)
return ctx
class ValuesList(_HashableSource, BaseTable):
def __init__(self, values, columns=None, alias=None):
self._values = values
self._columns = columns
super(ValuesList, self).__init__(alias=alias)
def _get_hash(self):
return hash((self.__class__, id(self._values), self._alias))
@Node.copy
def columns(self, *names):
self._columns = names
def __sql__(self, ctx):
if self._alias:
ctx.alias_manager[self] = self._alias
if ctx.scope == SCOPE_SOURCE or ctx.scope == SCOPE_NORMAL:
with ctx(parentheses=not ctx.parentheses):
ctx = (ctx
.literal('VALUES ')
.sql(CommaNodeList([
EnclosedNodeList(row) for row in self._values])))
if ctx.scope == SCOPE_SOURCE:
ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self]))
if self._columns:
entities = [Entity(c) for c in self._columns]
ctx.sql(EnclosedNodeList(entities))
else:
ctx.sql(Entity(ctx.alias_manager[self]))
return ctx
class CTE(_HashableSource, Source):
def __init__(self, name, query, recursive=False, columns=None,
materialized=None):
self._alias = name
self._query = query
self._recursive = recursive
self._materialized = materialized
if columns is not None:
columns = [Entity(c) if isinstance(c, basestring) else c
for c in columns]
self._columns = columns
query._cte_list = ()
super(CTE, self).__init__(alias=name)
def select_from(self, *columns):
if not columns:
raise ValueError('select_from() must specify one or more columns '
'from the CTE to select.')
query = (Select((self,), columns)
.with_cte(self)
.bind(self._query._database))
try:
query = query.objects(self._query.model)
except AttributeError:
pass
return query
def _get_hash(self):
return hash((self.__class__, self._alias, id(self._query)))
def union_all(self, rhs):
clone = self._query.clone()
return CTE(self._alias, clone + rhs, self._recursive, self._columns)
__add__ = union_all
def union(self, rhs):
clone = self._query.clone()
return CTE(self._alias, clone | rhs, self._recursive, self._columns)
__or__ = union
def __sql__(self, ctx):
if ctx.scope != SCOPE_CTE:
return ctx.sql(Entity(self._alias))
with ctx.push_alias():
ctx.alias_manager[self] = self._alias
ctx.sql(Entity(self._alias))
if self._columns:
ctx.literal(' ').sql(EnclosedNodeList(self._columns))
ctx.literal(' AS ')
if self._materialized:
ctx.literal('MATERIALIZED ')
elif self._materialized is False:
ctx.literal('NOT MATERIALIZED ')
with ctx.scope_normal(parentheses=True):
ctx.sql(self._query)
return ctx
class ColumnBase(Node):
_converter = None
@Node.copy
def converter(self, converter=None):
self._converter = converter
def alias(self, alias):
if alias:
return Alias(self, alias)
return self
def unalias(self):
return self
def cast(self, as_type):
return Cast(self, as_type)
def asc(self, collation=None, nulls=None):
return Asc(self, collation=collation, nulls=nulls)
__pos__ = asc
def desc(self, collation=None, nulls=None):
return Desc(self, collation=collation, nulls=nulls)
__neg__ = desc
def __invert__(self):
return Negated(self)
def _e(op, inv=False):
"""
Lightweight factory which returns a method that builds an Expression
consisting of the left-hand and right-hand operands, using `op`.
"""
def inner(self, rhs):
if inv:
return Expression(rhs, op, self)
return Expression(self, op, rhs)
return inner
__and__ = _e(OP.AND)
__or__ = _e(OP.OR)
__add__ = _e(OP.ADD)
__sub__ = _e(OP.SUB)
__mul__ = _e(OP.MUL)
__div__ = __truediv__ = _e(OP.DIV)
__xor__ = _e(OP.XOR)
__radd__ = _e(OP.ADD, inv=True)
__rsub__ = _e(OP.SUB, inv=True)
__rmul__ = _e(OP.MUL, inv=True)
__rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True)
__rand__ = _e(OP.AND, inv=True)
__ror__ = _e(OP.OR, inv=True)
__rxor__ = _e(OP.XOR, inv=True)
def __eq__(self, rhs):
op = OP.IS if rhs is None else OP.EQ
return Expression(self, op, rhs)
def __ne__(self, rhs):
op = OP.IS_NOT if rhs is None else OP.NE
return Expression(self, op, rhs)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lshift__ = _e(OP.IN)
__rshift__ = _e(OP.IS)
__mod__ = _e(OP.LIKE)
__pow__ = _e(OP.ILIKE)
like = _e(OP.LIKE)
ilike = _e(OP.ILIKE)
bin_and = _e(OP.BIN_AND)
bin_or = _e(OP.BIN_OR)
in_ = _e(OP.IN)
not_in = _e(OP.NOT_IN)
regexp = _e(OP.REGEXP)
# Special expressions.
def is_null(self, is_null=True):
op = OP.IS if is_null else OP.IS_NOT
return Expression(self, op, None)
def _escape_like_expr(self, s, template):
if s.find('_') >= 0 or s.find('%') >= 0 or s.find('\\') >= 0:
s = s.replace('\\', '\\\\').replace('_', '\\_').replace('%', '\\%')
return NodeList((template % s, SQL('ESCAPE'), '\\'))
return template % s
def contains(self, rhs):
if isinstance(rhs, Node):
rhs = Expression('%', OP.CONCAT,
Expression(rhs, OP.CONCAT, '%'))
else:
rhs = self._escape_like_expr(rhs, '%%%s%%')
return Expression(self, OP.ILIKE, rhs)
def startswith(self, rhs):
if isinstance(rhs, Node):
rhs = Expression(rhs, OP.CONCAT, '%')
else:
rhs = self._escape_like_expr(rhs, '%s%%')
return Expression(self, OP.ILIKE, rhs)
def endswith(self, rhs):
if isinstance(rhs, Node):
rhs = Expression('%', OP.CONCAT, rhs)
else:
rhs = self._escape_like_expr(rhs, '%%%s')
return Expression(self, OP.ILIKE, rhs)
def between(self, lo, hi):
return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi)))
def concat(self, rhs):
return StringExpression(self, OP.CONCAT, rhs)
def regexp(self, rhs):
return Expression(self, OP.REGEXP, rhs)
def iregexp(self, rhs):
return Expression(self, OP.IREGEXP, rhs)
def __getitem__(self, item):
if isinstance(item, slice):
if item.start is None or item.stop is None:
raise ValueError('BETWEEN range must have both a start- and '
'end-point.')
return self.between(item.start, item.stop)
return self == item
def distinct(self):
return NodeList((SQL('DISTINCT'), self))
def collate(self, collation):
return NodeList((self, SQL('COLLATE %s' % collation)))
def get_sort_key(self, ctx):
return ()
class Column(ColumnBase):
def __init__(self, source, name):
self.source = source
self.name = name
def get_sort_key(self, ctx):
if ctx.scope == SCOPE_VALUES:
return (self.name,)
else:
return self.source.get_sort_key(ctx) + (self.name,)
def __hash__(self):
return hash((self.source, self.name))
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
return ctx.sql(Entity(self.name))
else:
with ctx.scope_column():
return ctx.sql(self.source).literal('.').sql(Entity(self.name))
class WrappedNode(ColumnBase):
def __init__(self, node):
self.node = node
self._coerce = getattr(node, '_coerce', True)
self._converter = getattr(node, '_converter', None)
def is_alias(self):
return self.node.is_alias()
def unwrap(self):
return self.node.unwrap()
class EntityFactory(object):
__slots__ = ('node',)
def __init__(self, node):
self.node = node
def __getattr__(self, attr):
return Entity(self.node, attr)
class _DynamicEntity(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
return EntityFactory(instance._alias) # Implements __getattr__().
return self
class Alias(WrappedNode):
c = _DynamicEntity()
def __init__(self, node, alias):
super(Alias, self).__init__(node)
self._alias = alias
def __hash__(self):
return hash(self._alias)
@property
def name(self):
return self._alias
@name.setter
def name(self, value):
self._alias = value
def alias(self, alias=None):
if alias is None:
return self.node
else:
return Alias(self.node, alias)
def unalias(self):
return self.node
def is_alias(self):
return True
def __sql__(self, ctx):
if ctx.scope == SCOPE_SOURCE:
return (ctx
.sql(self.node)
.literal(' AS ')
.sql(Entity(self._alias)))
else:
return ctx.sql(Entity(self._alias))
class Negated(WrappedNode):
def __invert__(self):
return self.node
def __sql__(self, ctx):
return ctx.literal('NOT ').sql(self.node)
class BitwiseMixin(object):
def __and__(self, other):
return self.bin_and(other)
def __or__(self, other):
return self.bin_or(other)
def __sub__(self, other):
return self.bin_and(other.bin_negated())
def __invert__(self):
return BitwiseNegated(self)
class BitwiseNegated(BitwiseMixin, WrappedNode):
def __invert__(self):
return self.node
def __sql__(self, ctx):
if ctx.state.operations:
op_sql = ctx.state.operations.get(self.op, self.op)
else:
op_sql = self.op
return ctx.literal(op_sql).sql(self.node)
class Value(ColumnBase):
def __init__(self, value, converter=None, unpack=True):
self.value = value
self.converter = converter
self.multi = unpack and isinstance(self.value, multi_types)
if self.multi:
self.values = []
for item in self.value:
if isinstance(item, Node):
self.values.append(item)
else:
self.values.append(Value(item, self.converter))
def __sql__(self, ctx):
if self.multi:
# For multi-part values (e.g. lists of IDs).
return ctx.sql(EnclosedNodeList(self.values))
return ctx.value(self.value, self.converter)
class ValueLiterals(WrappedNode):
def __sql__(self, ctx):
with ctx(value_literals=True):
return ctx.sql(self.node)
def AsIs(value):
return Value(value, unpack=False)
class Cast(WrappedNode):
def __init__(self, node, cast):
super(Cast, self).__init__(node)
self._cast = cast
self._coerce = False
def __sql__(self, ctx):
return (ctx
.literal('CAST(')
.sql(self.node)
.literal(' AS %s)' % self._cast))
class Ordering(WrappedNode):
def __init__(self, node, direction, collation=None, nulls=None):
super(Ordering, self).__init__(node)
self.direction = direction
self.collation = collation
self.nulls = nulls
if nulls and nulls.lower() not in ('first', 'last'):
raise ValueError('Ordering nulls= parameter must be "first" or '
'"last", got: %s' % nulls)
def collate(self, collation=None):
return Ordering(self.node, self.direction, collation)
def _null_ordering_case(self, nulls):
if nulls.lower() == 'last':
ifnull, notnull = 1, 0
elif nulls.lower() == 'first':
ifnull, notnull = 0, 1
else:
raise ValueError('unsupported value for nulls= ordering.')
return Case(None, ((self.node.is_null(), ifnull),), notnull)
def __sql__(self, ctx):
if self.nulls and not ctx.state.nulls_ordering:
ctx.sql(self._null_ordering_case(self.nulls)).literal(', ')
ctx.sql(self.node).literal(' %s' % self.direction)
if self.collation:
ctx.literal(' COLLATE %s' % self.collation)
if self.nulls and ctx.state.nulls_ordering:
ctx.literal(' NULLS %s' % self.nulls)
return ctx
def Asc(node, collation=None, nulls=None):
return Ordering(node, 'ASC', collation, nulls)
def Desc(node, collation=None, nulls=None):
return Ordering(node, 'DESC', collation, nulls)
class Expression(ColumnBase):
def __init__(self, lhs, op, rhs, flat=False):
self.lhs = lhs
self.op = op
self.rhs = rhs
self.flat = flat
def __sql__(self, ctx):
overrides = {'parentheses': not self.flat, 'in_expr': True}
# First attempt to unwrap the node on the left-hand-side, so that we
# can get at the underlying Field if one is present.
node = raw_node = self.lhs
if isinstance(raw_node, WrappedNode):
node = raw_node.unwrap()
# Set up the appropriate converter if we have a field on the left side.
if isinstance(node, Field) and raw_node._coerce:
overrides['converter'] = node.db_value
overrides['is_fk_expr'] = isinstance(node, ForeignKeyField)
else:
overrides['converter'] = None
if ctx.state.operations:
op_sql = ctx.state.operations.get(self.op, self.op)
else:
op_sql = self.op
with ctx(**overrides):
# Postgresql reports an error for IN/NOT IN (), so convert to
# the equivalent boolean expression.
op_in = self.op == OP.IN or self.op == OP.NOT_IN
if op_in and ctx.as_new().parse(self.rhs)[0] == '()':
return ctx.literal('0 = 1' if self.op == OP.IN else '1 = 1')
return (ctx
.sql(self.lhs)
.literal(' %s ' % op_sql)
.sql(self.rhs))
class StringExpression(Expression):
def __add__(self, rhs):
return self.concat(rhs)
def __radd__(self, lhs):
return StringExpression(lhs, OP.CONCAT, self)
class Entity(ColumnBase):
def __init__(self, *path):
self._path = [part.replace('"', '""') for part in path if part]
def __getattr__(self, attr):
return Entity(*self._path + [attr])
def get_sort_key(self, ctx):
return tuple(self._path)
def __hash__(self):
return hash((self.__class__.__name__, tuple(self._path)))
def __sql__(self, ctx):
return ctx.literal(quote(self._path, ctx.state.quote or '""'))
class SQL(ColumnBase):
def __init__(self, sql, params=None):
self.sql = sql
self.params = params
def __sql__(self, ctx):
ctx.literal(self.sql)
if self.params:
for param in self.params:
ctx.value(param, False, add_param=False)
return ctx
def Check(constraint, name=None):
check = SQL('CHECK (%s)' % constraint)
if not name:
return check
return NodeList((SQL('CONSTRAINT'), Entity(name), check))
class Function(ColumnBase):
def __init__(self, name, arguments, coerce=True, python_value=None):
self.name = name
self.arguments = arguments
self._filter = None
self._order_by = None
self._python_value = python_value
if name and name.lower() in ('sum', 'count', 'cast', 'array_agg'):
self._coerce = False
else:
self._coerce = coerce
def __getattr__(self, attr):
def decorator(*args, **kwargs):
return Function(attr, args, **kwargs)
return decorator
@Node.copy
def filter(self, where=None):
self._filter = where
@Node.copy
def order_by(self, *ordering):
self._order_by = ordering
@Node.copy
def python_value(self, func=None):
self._python_value = func
def over(self, partition_by=None, order_by=None, start=None, end=None,
frame_type=None, window=None, exclude=None):
if isinstance(partition_by, Window) and window is None:
window = partition_by
if window is not None:
node = WindowAlias(window)
else:
node = Window(partition_by=partition_by, order_by=order_by,
start=start, end=end, frame_type=frame_type,
exclude=exclude, _inline=True)
return NodeList((self, SQL('OVER'), node))
def __sql__(self, ctx):
ctx.literal(self.name)
if not len(self.arguments):
ctx.literal('()')
else:
args = self.arguments
# If this is an ordered aggregate, then we will modify the last
# argument to append the ORDER BY ... clause. We do this to avoid
# double-wrapping any expression args in parentheses, as NodeList
# has a special check (hack) in place to work around this.
if self._order_by:
args = list(args)
args[-1] = NodeList((args[-1], SQL('ORDER BY'),
CommaNodeList(self._order_by)))
with ctx(in_function=True, function_arg_count=len(self.arguments)):
ctx.sql(EnclosedNodeList([
(arg if isinstance(arg, Node) else Value(arg, False))
for arg in args]))
if self._filter:
ctx.literal(' FILTER (WHERE ').sql(self._filter).literal(')')
return ctx
fn = Function(None, None)
class Window(Node):
# Frame start/end and frame exclusion.
CURRENT_ROW = SQL('CURRENT ROW')
GROUP = SQL('GROUP')
TIES = SQL('TIES')
NO_OTHERS = SQL('NO OTHERS')
# Frame types.
GROUPS = 'GROUPS'
RANGE = 'RANGE'
ROWS = 'ROWS'
def __init__(self, partition_by=None, order_by=None, start=None, end=None,
frame_type=None, extends=None, exclude=None, alias=None,
_inline=False):
super(Window, self).__init__()
if start is not None and not isinstance(start, SQL):
start = SQL(start)
if end is not None and not isinstance(end, SQL):
end = SQL(end)
self.partition_by = ensure_tuple(partition_by)
self.order_by = ensure_tuple(order_by)
self.start = start
self.end = end
if self.start is None and self.end is not None:
raise ValueError('Cannot specify WINDOW end without start.')
self._alias = alias or 'w'
self._inline = _inline
self.frame_type = frame_type
self._extends = extends
self._exclude = exclude
def alias(self, alias=None):
self._alias = alias or 'w'
return self
@Node.copy
def as_range(self):
self.frame_type = Window.RANGE
@Node.copy
def as_rows(self):
self.frame_type = Window.ROWS
@Node.copy
def as_groups(self):
self.frame_type = Window.GROUPS
@Node.copy
def extends(self, window=None):
self._extends = window
@Node.copy
def exclude(self, frame_exclusion=None):
if isinstance(frame_exclusion, basestring):
frame_exclusion = SQL(frame_exclusion)
self._exclude = frame_exclusion
@staticmethod
def following(value=None):
if value is None:
return SQL('UNBOUNDED FOLLOWING')
return SQL('%d FOLLOWING' % value)
@staticmethod
def preceding(value=None):
if value is None:
return SQL('UNBOUNDED PRECEDING')
return SQL('%d PRECEDING' % value)
def __sql__(self, ctx):
if ctx.scope != SCOPE_SOURCE and not self._inline:
ctx.literal(self._alias)
ctx.literal(' AS ')
with ctx(parentheses=True):
parts = []
if self._extends is not None:
ext = self._extends
if isinstance(ext, Window):
ext = SQL(ext._alias)
elif isinstance(ext, basestring):
ext = SQL(ext)
parts.append(ext)
if self.partition_by:
parts.extend((
SQL('PARTITION BY'),
CommaNodeList(self.partition_by)))
if self.order_by:
parts.extend((
SQL('ORDER BY'),
CommaNodeList(self.order_by)))
if self.start is not None and self.end is not None:
frame = self.frame_type or 'ROWS'
parts.extend((
SQL('%s BETWEEN' % frame),
self.start,
SQL('AND'),
self.end))
elif self.start is not None:
parts.extend((SQL(self.frame_type or 'ROWS'), self.start))
elif self.frame_type is not None:
parts.append(SQL('%s UNBOUNDED PRECEDING' % self.frame_type))
if self._exclude is not None:
parts.extend((SQL('EXCLUDE'), self._exclude))
ctx.sql(NodeList(parts))
return ctx
class WindowAlias(Node):
def __init__(self, window):
self.window = window
def alias(self, window_alias):
self.window._alias = window_alias
return self
def __sql__(self, ctx):
return ctx.literal(self.window._alias or 'w')
class ForUpdate(Node):
def __init__(self, expr, of=None, nowait=None):
expr = 'FOR UPDATE' if expr is True else expr
if expr.lower().endswith('nowait'):
expr = expr[:-7] # Strip off the "nowait" bit.
nowait = True
self._expr = expr
if of is not None and not isinstance(of, (list, set, tuple)):
of = (of,)
self._of = of
self._nowait = nowait
def __sql__(self, ctx):
ctx.literal(self._expr)
if self._of is not None:
ctx.literal(' OF ').sql(CommaNodeList(self._of))
if self._nowait:
ctx.literal(' NOWAIT')
return ctx
def Case(predicate, expression_tuples, default=None):
clauses = [SQL('CASE')]
if predicate is not None:
clauses.append(predicate)
for expr, value in expression_tuples:
clauses.extend((SQL('WHEN'), expr, SQL('THEN'), value))
if default is not None:
clauses.extend((SQL('ELSE'), default))
clauses.append(SQL('END'))
return NodeList(clauses)
class NodeList(ColumnBase):
def __init__(self, nodes, glue=' ', parens=False):
self.nodes = nodes
self.glue = glue
self.parens = parens
if parens and len(self.nodes) == 1 and \
isinstance(self.nodes[0], Expression) and \
not self.nodes[0].flat:
# Hack to avoid double-parentheses.
self.nodes = (self.nodes[0].clone(),)
self.nodes[0].flat = True
def __sql__(self, ctx):
n_nodes = len(self.nodes)
if n_nodes == 0:
return ctx.literal('()') if self.parens else ctx
with ctx(parentheses=self.parens):
for i in range(n_nodes - 1):
ctx.sql(self.nodes[i])
ctx.literal(self.glue)
ctx.sql(self.nodes[n_nodes - 1])
return ctx
def CommaNodeList(nodes):
return NodeList(nodes, ', ')
def EnclosedNodeList(nodes):
return NodeList(nodes, ', ', True)
class _Namespace(Node):
__slots__ = ('_name',)
def __init__(self, name):
self._name = name
def __getattr__(self, attr):
return NamespaceAttribute(self, attr)
__getitem__ = __getattr__
class NamespaceAttribute(ColumnBase):
def __init__(self, namespace, attribute):
self._namespace = namespace
self._attribute = attribute
def __sql__(self, ctx):
return (ctx
.literal(self._namespace._name + '.')
.sql(Entity(self._attribute)))
EXCLUDED = _Namespace('EXCLUDED')
class DQ(ColumnBase):
def __init__(self, **query):
super(DQ, self).__init__()
self.query = query
self._negated = False
@Node.copy
def __invert__(self):
self._negated = not self._negated
def clone(self):
node = DQ(**self.query)
node._negated = self._negated
return node
#: Represent a row tuple.
Tuple = lambda *a: EnclosedNodeList(a)
class QualifiedNames(WrappedNode):
def __sql__(self, ctx):
with ctx.scope_column():
return ctx.sql(self.node)
def qualify_names(node):
# Search a node heirarchy to ensure that any column-like objects are
# referenced using fully-qualified names.
if isinstance(node, Expression):
return node.__class__(qualify_names(node.lhs), node.op,
qualify_names(node.rhs), node.flat)
elif isinstance(node, ColumnBase):
return QualifiedNames(node)
return node
class OnConflict(Node):
def __init__(self, action=None, update=None, preserve=None, where=None,
conflict_target=None, conflict_where=None,
conflict_constraint=None):
self._action = action
self._update = update
self._preserve = ensure_tuple(preserve)
self._where = where
if conflict_target is not None and conflict_constraint is not None:
raise ValueError('only one of "conflict_target" and '
'"conflict_constraint" may be specified.')
self._conflict_target = ensure_tuple(conflict_target)
self._conflict_where = conflict_where
self._conflict_constraint = conflict_constraint
def get_conflict_statement(self, ctx, query):
return ctx.state.conflict_statement(self, query)
def get_conflict_update(self, ctx, query):
return ctx.state.conflict_update(self, query)
@Node.copy
def preserve(self, *columns):
self._preserve = columns
@Node.copy
def update(self, _data=None, **kwargs):
if _data and kwargs and not isinstance(_data, dict):
raise ValueError('Cannot mix data with keyword arguments in the '
'OnConflict update method.')
_data = _data or {}
if kwargs:
_data.update(kwargs)
self._update = _data
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def conflict_target(self, *constraints):
self._conflict_constraint = None
self._conflict_target = constraints
@Node.copy
def conflict_where(self, *expressions):
if self._conflict_where is not None:
expressions = (self._conflict_where,) + expressions
self._conflict_where = reduce(operator.and_, expressions)
@Node.copy
def conflict_constraint(self, constraint):
self._conflict_constraint = constraint
self._conflict_target = None
def database_required(method):
@wraps(method)
def inner(self, database=None, *args, **kwargs):
database = self._database if database is None else database
if not database:
raise InterfaceError('Query must be bound to a database in order '
'to call "%s".' % method.__name__)
return method(self, database, *args, **kwargs)
return inner
# BASE QUERY INTERFACE.
class BaseQuery(Node):
default_row_type = ROW.DICT
def __init__(self, _database=None, **kwargs):
self._database = _database
self._cursor_wrapper = None
self._row_type = None
self._constructor = None
super(BaseQuery, self).__init__(**kwargs)
def bind(self, database=None):
self._database = database
return self
def clone(self):
query = super(BaseQuery, self).clone()
query._cursor_wrapper = None
return query
@Node.copy
def dicts(self, as_dict=True):
self._row_type = ROW.DICT if as_dict else None
return self
@Node.copy
def tuples(self, as_tuple=True):
self._row_type = ROW.TUPLE if as_tuple else None
return self
@Node.copy
def namedtuples(self, as_namedtuple=True):
self._row_type = ROW.NAMED_TUPLE if as_namedtuple else None
return self
@Node.copy
def objects(self, constructor=None):
self._row_type = ROW.CONSTRUCTOR if constructor else None
self._constructor = constructor
return self
def _get_cursor_wrapper(self, cursor):
row_type = self._row_type or self.default_row_type
if row_type == ROW.DICT:
return DictCursorWrapper(cursor)
elif row_type == ROW.TUPLE:
return CursorWrapper(cursor)
elif row_type == ROW.NAMED_TUPLE:
return NamedTupleCursorWrapper(cursor)
elif row_type == ROW.CONSTRUCTOR:
return ObjectCursorWrapper(cursor, self._constructor)
else:
raise ValueError('Unrecognized row type: "%s".' % row_type)
def __sql__(self, ctx):
raise NotImplementedError
def sql(self):
if self._database:
context = self._database.get_sql_context()
else:
context = Context()
return context.parse(self)
@database_required
def execute(self, database):
return self._execute(database)
def _execute(self, database):
raise NotImplementedError
def iterator(self, database=None):
return iter(self.execute(database).iterator())
def _ensure_execution(self):
if not self._cursor_wrapper:
if not self._database:
raise ValueError('Query has not been executed.')
self.execute()
def __iter__(self):
self._ensure_execution()
return iter(self._cursor_wrapper)
def __getitem__(self, value):
self._ensure_execution()
if isinstance(value, slice):
index = value.stop
else:
index = value
if index is not None:
index = index + 1 if index >= 0 else 0
self._cursor_wrapper.fill_cache(index)
return self._cursor_wrapper.row_cache[value]
def __len__(self):
self._ensure_execution()
return len(self._cursor_wrapper)
def __str__(self):
return query_to_string(self)
class RawQuery(BaseQuery):
def __init__(self, sql=None, params=None, **kwargs):
super(RawQuery, self).__init__(**kwargs)
self._sql = sql
self._params = params
def __sql__(self, ctx):
ctx.literal(self._sql)
if self._params:
for param in self._params:
ctx.value(param, add_param=False)
return ctx
def _execute(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
class Query(BaseQuery):
def __init__(self, where=None, order_by=None, limit=None, offset=None,
**kwargs):
super(Query, self).__init__(**kwargs)
self._where = where
self._order_by = order_by
self._limit = limit
self._offset = offset
self._cte_list = None
@Node.copy
def with_cte(self, *cte_list):
self._cte_list = cte_list
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def orwhere(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.or_, expressions)
@Node.copy
def order_by(self, *values):
self._order_by = values
@Node.copy
def order_by_extend(self, *values):
self._order_by = ((self._order_by or ()) + values) or None
@Node.copy
def limit(self, value=None):
self._limit = value
@Node.copy
def offset(self, value=None):
self._offset = value
@Node.copy
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
def _apply_ordering(self, ctx):
if self._order_by:
(ctx
.literal(' ORDER BY ')
.sql(CommaNodeList(self._order_by)))
if self._limit is not None or (self._offset is not None and
ctx.state.limit_max):
limit = ctx.state.limit_max if self._limit is None else self._limit
ctx.literal(' LIMIT ').sql(limit)
if self._offset is not None:
ctx.literal(' OFFSET ').sql(self._offset)
return ctx
def __sql__(self, ctx):
if self._cte_list:
# The CTE scope is only used at the very beginning of the query,
# when we are describing the various CTEs we will be using.
recursive = any(cte._recursive for cte in self._cte_list)
# Explicitly disable the "subquery" flag here, so as to avoid
# unnecessary parentheses around subsequent selects.
with ctx.scope_cte(subquery=False):
(ctx
.literal('WITH RECURSIVE ' if recursive else 'WITH ')
.sql(CommaNodeList(self._cte_list))
.literal(' '))
return ctx
def __compound_select__(operation, inverted=False):
@__bind_database__
def method(self, other):
if inverted:
self, other = other, self
return CompoundSelectQuery(self, operation, other)
return method
class SelectQuery(Query):
union_all = __add__ = __compound_select__('UNION ALL')
union = __or__ = __compound_select__('UNION')
intersect = __and__ = __compound_select__('INTERSECT')
except_ = __sub__ = __compound_select__('EXCEPT')
__radd__ = __compound_select__('UNION ALL', inverted=True)
__ror__ = __compound_select__('UNION', inverted=True)
__rand__ = __compound_select__('INTERSECT', inverted=True)
__rsub__ = __compound_select__('EXCEPT', inverted=True)
def select_from(self, *columns):
if not columns:
raise ValueError('select_from() must specify one or more columns.')
query = (Select((self,), columns)
.bind(self._database))
if getattr(self, 'model', None) is not None:
# Bind to the sub-select's model type, if defined.
query = query.objects(self.model)
return query
class SelectBase(_HashableSource, Source, SelectQuery):
def _get_hash(self):
return hash((self.__class__, self._alias or id(self)))
def _execute(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
@database_required
def peek(self, database, n=1):
rows = self.execute(database)[:n]
if rows:
return rows[0] if n == 1 else rows
@database_required
def first(self, database, n=1):
if self._limit != n:
self._limit = n
self._cursor_wrapper = None
return self.peek(database, n=n)
@database_required
def scalar(self, database, as_tuple=False, as_dict=False):
if as_dict:
return self.dicts().peek(database)
row = self.tuples().peek(database)
return row[0] if row and not as_tuple else row
@database_required
def count(self, database, clear_limit=False):
clone = self.order_by().alias('_wrapped')
if clear_limit:
clone._limit = clone._offset = None
try:
if clone._having is None and clone._group_by is None and \
clone._windows is None and clone._distinct is None and \
clone._simple_distinct is not True:
clone = clone.select(SQL('1'))
except AttributeError:
pass
return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database)
@database_required
def exists(self, database):
clone = self.columns(SQL('1'))
clone._limit = 1
clone._offset = None
return bool(clone.scalar())
@database_required
def get(self, database):
self._cursor_wrapper = None
try:
return self.execute(database)[0]
except IndexError:
pass
# QUERY IMPLEMENTATIONS.
class CompoundSelectQuery(SelectBase):
def __init__(self, lhs, op, rhs):
super(CompoundSelectQuery, self).__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
@property
def _returning(self):
return self.lhs._returning
@database_required
def exists(self, database):
query = Select((self.limit(1),), (SQL('1'),)).bind(database)
return bool(query.scalar())
def _get_query_key(self):
return (self.lhs.get_query_key(), self.rhs.get_query_key())
def _wrap_parens(self, ctx, subq):
csq_setting = ctx.state.compound_select_parentheses
if not csq_setting or csq_setting == CSQ_PARENTHESES_NEVER:
return False
elif csq_setting == CSQ_PARENTHESES_ALWAYS:
return True
elif csq_setting == CSQ_PARENTHESES_UNNESTED:
if ctx.state.in_expr or ctx.state.in_function:
# If this compound select query is being used inside an
# expression, e.g., an IN or EXISTS().
return False
# If the query on the left or right is itself a compound select
# query, then we do not apply parentheses. However, if it is a
# regular SELECT query, we will apply parentheses.
return not isinstance(subq, CompoundSelectQuery)
def __sql__(self, ctx):
if ctx.scope == SCOPE_COLUMN:
return self.apply_column(ctx)
# Call parent method to handle any CTEs.
super(CompoundSelectQuery, self).__sql__(ctx)
outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE)
with ctx(parentheses=outer_parens):
# Should the left-hand query be wrapped in parentheses?
lhs_parens = self._wrap_parens(ctx, self.lhs)
with ctx.scope_normal(parentheses=lhs_parens, subquery=False):
ctx.sql(self.lhs)
ctx.literal(' %s ' % self.op)
with ctx.push_alias():
# Should the right-hand query be wrapped in parentheses?
rhs_parens = self._wrap_parens(ctx, self.rhs)
with ctx.scope_normal(parentheses=rhs_parens, subquery=False):
ctx.sql(self.rhs)
# Apply ORDER BY, LIMIT, OFFSET. We use the "values" scope so that
# entity names are not fully-qualified. This is a bit of a hack, as
# we're relying on the logic in Column.__sql__() to not fully
# qualify column names.
with ctx.scope_values():
self._apply_ordering(ctx)
return self.apply_alias(ctx)
class Select(SelectBase):
def __init__(self, from_list=None, columns=None, group_by=None,
having=None, distinct=None, windows=None, for_update=None,
for_update_of=None, nowait=None, lateral=None, **kwargs):
super(Select, self).__init__(**kwargs)
self._from_list = (list(from_list) if isinstance(from_list, tuple)
else from_list) or []
self._returning = columns
self._group_by = group_by
self._having = having
self._windows = None
self._for_update = for_update # XXX: consider reorganizing.
self._for_update_of = for_update_of
self._for_update_nowait = nowait
self._lateral = lateral
self._distinct = self._simple_distinct = None
if distinct:
if isinstance(distinct, bool):
self._simple_distinct = distinct
else:
self._distinct = distinct
self._cursor_wrapper = None
def clone(self):
clone = super(Select, self).clone()
if clone._from_list:
clone._from_list = list(clone._from_list)
return clone
@Node.copy
def columns(self, *columns, **kwargs):
self._returning = columns
select = columns
@Node.copy
def select_extend(self, *columns):
self._returning = tuple(self._returning) + columns
@property
def selected_columns(self):
return self._returning
@selected_columns.setter
def selected_columns(self, value):
self._returning = value
@Node.copy
def from_(self, *sources):
self._from_list = list(sources)
@Node.copy
def join(self, dest, join_type=JOIN.INNER, on=None):
if not self._from_list:
raise ValueError('No sources to join on.')
item = self._from_list.pop()
self._from_list.append(Join(item, dest, join_type, on))
def left_outer_join(self, dest, on=None):
return self.join(dest, JOIN.LEFT_OUTER, on)
@Node.copy
def group_by(self, *columns):
grouping = []
for column in columns:
if isinstance(column, Table):
if not column._columns:
raise ValueError('Cannot pass a table to group_by() that '
'does not have columns explicitly '
'declared.')
grouping.extend([getattr(column, col_name)
for col_name in column._columns])
else:
grouping.append(column)
self._group_by = grouping
def group_by_extend(self, *values):
"""@Node.copy used from group_by() call"""
group_by = tuple(self._group_by or ()) + values
return self.group_by(*group_by)
@Node.copy
def having(self, *expressions):
if self._having is not None:
expressions = (self._having,) + expressions
self._having = reduce(operator.and_, expressions)
@Node.copy
def distinct(self, *columns):
if len(columns) == 1 and (columns[0] is True or columns[0] is False):
self._simple_distinct = columns[0]
else:
self._simple_distinct = False
self._distinct = columns
@Node.copy
def window(self, *windows):
self._windows = windows if windows else None
@Node.copy
def for_update(self, for_update=True, of=None, nowait=None):
if not for_update and (of is not None or nowait):
for_update = True
self._for_update = for_update
self._for_update_of = of
self._for_update_nowait = nowait
@Node.copy
def lateral(self, lateral=True):
self._lateral = lateral
def _get_query_key(self):
return self._alias
def __sql_selection__(self, ctx, is_subquery=False):
return ctx.sql(CommaNodeList(self._returning))
def __sql__(self, ctx):
if ctx.scope == SCOPE_COLUMN:
return self.apply_column(ctx)
if self._lateral and ctx.scope == SCOPE_SOURCE:
ctx.literal('LATERAL ')
is_subquery = ctx.subquery
state = {
'converter': None,
'in_function': False,
'parentheses': is_subquery or (ctx.scope == SCOPE_SOURCE),
'subquery': True,
}
if ctx.state.in_function and ctx.state.function_arg_count == 1:
state['parentheses'] = False
with ctx.scope_normal(**state):
# Defer calling parent SQL until here. This ensures that any CTEs
# for this query will be properly nested if this query is a
# sub-select or is used in an expression. See GH#1809 for example.
super(Select, self).__sql__(ctx)
ctx.literal('SELECT ')
if self._simple_distinct or self._distinct is not None:
ctx.literal('DISTINCT ')
if self._distinct:
(ctx
.literal('ON ')
.sql(EnclosedNodeList(self._distinct))
.literal(' '))
with ctx.scope_source():
ctx = self.__sql_selection__(ctx, is_subquery)
if self._from_list:
with ctx.scope_source(parentheses=False):
ctx.literal(' FROM ').sql(CommaNodeList(self._from_list))
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
if self._group_by:
ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by))
if self._having is not None:
ctx.literal(' HAVING ').sql(self._having)
if self._windows is not None:
ctx.literal(' WINDOW ')
ctx.sql(CommaNodeList(self._windows))
# Apply ORDER BY, LIMIT, OFFSET.
self._apply_ordering(ctx)
if self._for_update:
if not ctx.state.for_update:
raise ValueError('FOR UPDATE specified but not supported '
'by database.')
ctx.literal(' ')
ctx.sql(ForUpdate(self._for_update, self._for_update_of,
self._for_update_nowait))
# If the subquery is inside a function -or- we are evaluating a
# subquery on either side of an expression w/o an explicit alias, do
# not generate an alias + AS clause.
if ctx.state.in_function or (ctx.state.in_expr and
self._alias is None):
return ctx
return self.apply_alias(ctx)
class _WriteQuery(Query):
def __init__(self, table, returning=None, **kwargs):
self.table = table
self._returning = returning
self._return_cursor = True if returning else False
super(_WriteQuery, self).__init__(**kwargs)
def cte(self, name, recursive=False, columns=None, materialized=None):
return CTE(name, self, recursive=recursive, columns=columns,
materialized=materialized)
@Node.copy
def returning(self, *returning):
self._returning = returning
self._return_cursor = True if returning else False
def apply_returning(self, ctx):
if self._returning:
with ctx.scope_source():
ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning))
return ctx
def _execute(self, database):
if self._returning:
cursor = self.execute_returning(database)
else:
cursor = database.execute(self)
return self.handle_result(database, cursor)
def execute_returning(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
def handle_result(self, database, cursor):
if self._return_cursor:
return cursor
return database.rows_affected(cursor)
def _set_table_alias(self, ctx):
ctx.alias_manager[self.table] = self.table.__name__
def __sql__(self, ctx):
super(_WriteQuery, self).__sql__(ctx)
# We explicitly set the table alias to the table's name, which ensures
# that if a sub-select references a column on the outer table, we won't
# assign it a new alias (e.g. t2) but will refer to it as table.column.
self._set_table_alias(ctx)
return ctx
class Update(_WriteQuery):
def __init__(self, table, update=None, **kwargs):
super(Update, self).__init__(table, **kwargs)
self._update = update
self._from = None
@Node.copy
def from_(self, *sources):
self._from = sources
def __sql__(self, ctx):
super(Update, self).__sql__(ctx)
with ctx.scope_values(subquery=True):
ctx.literal('UPDATE ')
expressions = []
for k, v in sorted(self._update.items(), key=ctx.column_sort_key):
if not isinstance(v, Node):
if isinstance(k, Field):
v = k.to_value(v)
else:
v = Value(v, unpack=False)
elif isinstance(v, Model) and isinstance(k, ForeignKeyField):
# NB: we want to ensure that when passed a model instance
# in the context of a foreign-key, we apply the fk-specific
# adaptation of the model.
v = k.to_value(v)
if not isinstance(v, Value):
v = qualify_names(v)
expressions.append(NodeList((k, SQL('='), v)))
(ctx
.sql(self.table)
.literal(' SET ')
.sql(CommaNodeList(expressions)))
if self._from:
with ctx.scope_source(parentheses=False):
ctx.literal(' FROM ').sql(CommaNodeList(self._from))
if self._where:
with ctx.scope_normal():
ctx.literal(' WHERE ').sql(self._where)
self._apply_ordering(ctx)
return self.apply_returning(ctx)
class Insert(_WriteQuery):
SIMPLE = 0
QUERY = 1
MULTI = 2
class DefaultValuesException(Exception):
pass
def __init__(self, table, insert=None, columns=None, on_conflict=None,
**kwargs):
super(Insert, self).__init__(table, **kwargs)
self._insert = insert
self._columns = columns
self._on_conflict = on_conflict
self._query_type = None
self._as_rowcount = False
def where(self, *expressions):
raise NotImplementedError('INSERT queries cannot have a WHERE clause.')
@Node.copy
def as_rowcount(self, _as_rowcount=True):
self._as_rowcount = _as_rowcount
@Node.copy
def on_conflict_ignore(self, ignore=True):
self._on_conflict = OnConflict('IGNORE') if ignore else None
@Node.copy
def on_conflict_replace(self, replace=True):
self._on_conflict = OnConflict('REPLACE') if replace else None
@Node.copy
def on_conflict(self, *args, **kwargs):
self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs)
else None)
def _simple_insert(self, ctx):
if not self._insert:
raise self.DefaultValuesException('Error: no data to insert.')
return self._generate_insert((self._insert,), ctx)
def get_default_data(self):
return {}
def get_default_columns(self):
if self.table._columns:
return [getattr(self.table, col) for col in self.table._columns
if col != self.table._primary_key]
def _generate_insert(self, insert, ctx):
rows_iter = iter(insert)
columns = self._columns
# Load and organize column defaults (if provided).
defaults = self.get_default_data()
# First figure out what columns are being inserted (if they weren't
# specified explicitly). Resulting columns are normalized and ordered.
if not columns:
try:
row = next(rows_iter)
except StopIteration:
raise self.DefaultValuesException('Error: no rows to insert.')
if not isinstance(row, Mapping):
columns = self.get_default_columns()
if columns is None:
raise ValueError('Bulk insert must specify columns.')
else:
# Infer column names from the dict of data being inserted.
accum = []
for column in row:
if isinstance(column, basestring):
column = getattr(self.table, column)
accum.append(column)
# Add any columns present in the default data that are not
# accounted for by the dictionary of row data.
column_set = set(accum)
for col in (set(defaults) - column_set):
accum.append(col)
columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx))
rows_iter = itertools.chain(iter((row,)), rows_iter)
else:
clean_columns = []
seen = set()
for column in columns:
if isinstance(column, basestring):
column_obj = getattr(self.table, column)
else:
column_obj = column
clean_columns.append(column_obj)
seen.add(column_obj)
columns = clean_columns
for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)):
if col not in seen:
columns.append(col)
fk_fields = set()
nullable_columns = set()
value_lookups = {}
for column in columns:
lookups = [column, column.name]
if isinstance(column, Field):
if column.name != column.column_name:
lookups.append(column.column_name)
if column.null:
nullable_columns.add(column)
if isinstance(column, ForeignKeyField):
fk_fields.add(column)
value_lookups[column] = lookups
ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ')
columns_converters = [
(column, column.db_value if isinstance(column, Field) else None)
for column in columns]
all_values = []
for row in rows_iter:
values = []
is_dict = isinstance(row, Mapping)
for i, (column, converter) in enumerate(columns_converters):
try:
if is_dict:
# The logic is a bit convoluted, but in order to be
# flexible in what we accept (dict keyed by
# column/field, field name, or underlying column name),
# we try accessing the row data dict using each
# possible key. If no match is found, throw an error.
for lookup in value_lookups[column]:
try:
val = row[lookup]
except KeyError:
pass
else:
break
else:
raise KeyError
else:
val = row[i]
except (KeyError, IndexError):
if column in defaults:
val = defaults[column]
if callable_(val):
val = val()
elif column in nullable_columns:
val = None
else:
raise ValueError('Missing value for %s.' % column.name)
if not isinstance(val, Node) or (isinstance(val, Model) and
column in fk_fields):
val = Value(val, converter=converter, unpack=False)
values.append(val)
all_values.append(EnclosedNodeList(values))
if not all_values:
raise self.DefaultValuesException('Error: no data to insert.')
with ctx.scope_values(subquery=True):
return ctx.sql(CommaNodeList(all_values))
def _query_insert(self, ctx):
return (ctx
.sql(EnclosedNodeList(self._columns))
.literal(' ')
.sql(self._insert))
def _default_values(self, ctx):
if not self._database:
return ctx.literal('DEFAULT VALUES')
return self._database.default_values_insert(ctx)
def __sql__(self, ctx):
super(Insert, self).__sql__(ctx)
with ctx.scope_values():
stmt = None
if self._on_conflict is not None:
stmt = self._on_conflict.get_conflict_statement(ctx, self)
(ctx
.sql(stmt or SQL('INSERT'))
.literal(' INTO ')
.sql(self.table)
.literal(' '))
if isinstance(self._insert, Mapping) and not self._columns:
try:
self._simple_insert(ctx)
except self.DefaultValuesException:
self._default_values(ctx)
self._query_type = Insert.SIMPLE
elif isinstance(self._insert, (SelectQuery, SQL)):
self._query_insert(ctx)
self._query_type = Insert.QUERY
else:
self._generate_insert(self._insert, ctx)
self._query_type = Insert.MULTI
if self._on_conflict is not None:
update = self._on_conflict.get_conflict_update(ctx, self)
if update is not None:
ctx.literal(' ').sql(update)
return self.apply_returning(ctx)
def _execute(self, database):
if self._returning is None and database.returning_clause \
and self.table._primary_key:
self._returning = (self.table._primary_key,)
try:
return super(Insert, self)._execute(database)
except self.DefaultValuesException:
pass
def handle_result(self, database, cursor):
if self._return_cursor:
return cursor
if self._as_rowcount:
return database.rows_affected(cursor)
return database.last_insert_id(cursor, self._query_type)
class Delete(_WriteQuery):
def __sql__(self, ctx):
super(Delete, self).__sql__(ctx)
with ctx.scope_values(subquery=True):
ctx.literal('DELETE FROM ').sql(self.table)
if self._where is not None:
with ctx.scope_normal():
ctx.literal(' WHERE ').sql(self._where)
self._apply_ordering(ctx)
return self.apply_returning(ctx)
class Index(Node):
def __init__(self, name, table, expressions, unique=False, safe=False,
where=None, using=None):
self._name = name
self._table = Entity(table) if not isinstance(table, Table) else table
self._expressions = expressions
self._where = where
self._unique = unique
self._safe = safe
self._using = using
@Node.copy
def safe(self, _safe=True):
self._safe = _safe
@Node.copy
def where(self, *expressions):
if self._where is not None:
expressions = (self._where,) + expressions
self._where = reduce(operator.and_, expressions)
@Node.copy
def using(self, _using=None):
self._using = _using
def __sql__(self, ctx):
statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX '
with ctx.scope_values(subquery=True):
ctx.literal(statement)
if self._safe:
ctx.literal('IF NOT EXISTS ')
# Sqlite uses CREATE INDEX <schema>.<name> ON <table>, whereas most
# others use: CREATE INDEX <name> ON <schema>.<table>.
if ctx.state.index_schema_prefix and \
isinstance(self._table, Table) and self._table._schema:
index_name = Entity(self._table._schema, self._name)
table_name = Entity(self._table.__name__)
else:
index_name = Entity(self._name)
table_name = self._table
ctx.sql(index_name)
if self._using is not None and \
ctx.state.index_using_precedes_table:
ctx.literal(' USING %s' % self._using) # MySQL style.
(ctx
.literal(' ON ')
.sql(table_name)
.literal(' '))
if self._using is not None and not \
ctx.state.index_using_precedes_table:
ctx.literal('USING %s ' % self._using) # Postgres/default.
ctx.sql(EnclosedNodeList([
SQL(expr) if isinstance(expr, basestring) else expr
for expr in self._expressions]))
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
return ctx
class ModelIndex(Index):
def __init__(self, model, fields, unique=False, safe=True, where=None,
using=None, name=None):
self._model = model
if name is None:
name = self._generate_name_from_fields(model, fields)
if using is None:
for field in fields:
if isinstance(field, Field) and hasattr(field, 'index_type'):
using = field.index_type
super(ModelIndex, self).__init__(
name=name,
table=model._meta.table,
expressions=fields,
unique=unique,
safe=safe,
where=where,
using=using)
def _generate_name_from_fields(self, model, fields):
accum = []
for field in fields:
if isinstance(field, basestring):
accum.append(field.split()[0])
else:
if isinstance(field, Node) and not isinstance(field, Field):
field = field.unwrap()
if isinstance(field, Field):
accum.append(field.column_name)
if not accum:
raise ValueError('Unable to generate a name for the index, please '
'explicitly specify a name.')
clean_field_names = re.sub(r'[^\w]+', '', '_'.join(accum))
meta = model._meta
prefix = meta.name if meta.legacy_table_names else meta.table_name
return _truncate_constraint_name('_'.join((prefix, clean_field_names)))
def _truncate_constraint_name(constraint, maxlen=64):
if len(constraint) > maxlen:
name_hash = hashlib.md5(constraint.encode('utf-8')).hexdigest()
constraint = '%s_%s' % (constraint[:(maxlen - 8)], name_hash[:7])
return constraint
# DB-API 2.0 EXCEPTIONS.
class PeeweeException(Exception):
def __init__(self, *args):
if args and isinstance(args[0], Exception):
self.orig, args = args[0], args[1:]
super(PeeweeException, self).__init__(*args)
class ImproperlyConfigured(PeeweeException): pass
class DatabaseError(PeeweeException): pass
class DataError(DatabaseError): pass
class IntegrityError(DatabaseError): pass
class InterfaceError(PeeweeException): pass
class InternalError(DatabaseError): pass
class NotSupportedError(DatabaseError): pass
class OperationalError(DatabaseError): pass
class ProgrammingError(DatabaseError): pass
class ExceptionWrapper(object):
__slots__ = ('exceptions',)
def __init__(self, exceptions):
self.exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
# psycopg2.8 shits out a million cute error types. Try to catch em all.
if pg_errors is not None and exc_type.__name__ not in self.exceptions \
and issubclass(exc_type, pg_errors.Error):
exc_type = exc_type.__bases__[0]
if exc_type.__name__ in self.exceptions:
new_type = self.exceptions[exc_type.__name__]
exc_args = exc_value.args
reraise(new_type, new_type(exc_value, *exc_args), traceback)
EXCEPTIONS = {
'ConstraintError': IntegrityError,
'DatabaseError': DatabaseError,
'DataError': DataError,
'IntegrityError': IntegrityError,
'InterfaceError': InterfaceError,
'InternalError': InternalError,
'NotSupportedError': NotSupportedError,
'OperationalError': OperationalError,
'ProgrammingError': ProgrammingError,
'TransactionRollbackError': OperationalError}
__exception_wrapper__ = ExceptionWrapper(EXCEPTIONS)
# DATABASE INTERFACE AND CONNECTION MANAGEMENT.
IndexMetadata = collections.namedtuple(
'IndexMetadata',
('name', 'sql', 'columns', 'unique', 'table'))
ColumnMetadata = collections.namedtuple(
'ColumnMetadata',
('name', 'data_type', 'null', 'primary_key', 'table', 'default'))
ForeignKeyMetadata = collections.namedtuple(
'ForeignKeyMetadata',
('column', 'dest_table', 'dest_column', 'table'))
ViewMetadata = collections.namedtuple('ViewMetadata', ('name', 'sql'))
class _ConnectionState(object):
def __init__(self, **kwargs):
super(_ConnectionState, self).__init__(**kwargs)
self.reset()
def reset(self):
self.closed = True
self.conn = None
self.ctx = []
self.transactions = []
def set_connection(self, conn):
self.conn = conn
self.closed = False
self.ctx = []
self.transactions = []
class _ConnectionLocal(_ConnectionState, threading.local): pass
class _NoopLock(object):
__slots__ = ()
def __enter__(self): return self
def __exit__(self, exc_type, exc_val, exc_tb): pass
class ConnectionContext(_callable_context_manager):
__slots__ = ('db',)
def __init__(self, db): self.db = db
def __enter__(self):
if self.db.is_closed():
self.db.connect()
def __exit__(self, exc_type, exc_val, exc_tb): self.db.close()
class Database(_callable_context_manager):
context_class = Context
field_types = {}
operations = {}
param = '?'
quote = '""'
server_version = None
# Feature toggles.
commit_select = False
compound_select_parentheses = CSQ_PARENTHESES_NEVER
for_update = False
index_schema_prefix = False
index_using_precedes_table = False
limit_max = None
nulls_ordering = False
returning_clause = False
safe_create_index = True
safe_drop_index = True
sequences = False
truncate_table = True
def __init__(self, database, thread_safe=True, autorollback=False,
field_types=None, operations=None, autocommit=None,
autoconnect=True, **kwargs):
self._field_types = merge_dict(FIELD, self.field_types)
self._operations = merge_dict(OP, self.operations)
if field_types:
self._field_types.update(field_types)
if operations:
self._operations.update(operations)
self.autoconnect = autoconnect
self.autorollback = autorollback
self.thread_safe = thread_safe
if thread_safe:
self._state = _ConnectionLocal()
self._lock = threading.RLock()
else:
self._state = _ConnectionState()
self._lock = _NoopLock()
if autocommit is not None:
__deprecated__('Peewee no longer uses the "autocommit" option, as '
'the semantics now require it to always be True. '
'Because some database-drivers also use the '
'"autocommit" parameter, you are receiving a '
'warning so you may update your code and remove '
'the parameter, as in the future, specifying '
'autocommit could impact the behavior of the '
'database driver you are using.')
self.connect_params = {}
self.init(database, **kwargs)
def init(self, database, **kwargs):
if not self.is_closed():
self.close()
self.database = database
self.connect_params.update(kwargs)
self.deferred = not bool(database)
def __enter__(self):
if self.is_closed():
self.connect()
ctx = self.atomic()
self._state.ctx.append(ctx)
ctx.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
ctx = self._state.ctx.pop()
try:
ctx.__exit__(exc_type, exc_val, exc_tb)
finally:
if not self._state.ctx:
self.close()
def connection_context(self):
return ConnectionContext(self)
def _connect(self):
raise NotImplementedError
def connect(self, reuse_if_open: bool = False) -> bool:
"""
连接到数据库
:param reuse_if_open: 是否重用已经打开的连接
:return: None
"""
with self._lock:
if self.deferred:
raise InterfaceError('Error, database must be initialized '
'before opening a connection.')
if not self._state.closed:
if reuse_if_open:
return False
raise OperationalError('Connection already opened.')
self._state.reset()
with __exception_wrapper__:
self._state.set_connection(self._connect())
if self.server_version is None:
self._set_server_version(self._state.conn)
self._initialize_connection(self._state.conn)
return True
def _initialize_connection(self, conn):
pass
def _set_server_version(self, conn):
self.server_version = 0
def close(self):
with self._lock:
if self.deferred:
raise InterfaceError('Error, database must be initialized '
'before opening a connection.')
if self.in_transaction():
raise OperationalError('Attempting to close database while '
'transaction is open.')
is_open = not self._state.closed
try:
if is_open:
with __exception_wrapper__:
self._close(self._state.conn)
finally:
self._state.reset()
return is_open
def _close(self, conn):
conn.close()
def is_closed(self):
return self._state.closed
def is_connection_usable(self):
return not self._state.closed
def connection(self):
if self.is_closed():
self.connect()
return self._state.conn
def cursor(self, commit=None):
if self.is_closed():
if self.autoconnect:
self.connect()
else:
raise InterfaceError('Error, database connection not opened.')
return self._state.conn.cursor()
def execute_sql(self, sql, params=None, commit=SENTINEL):
logger.debug((sql, params))
if commit is SENTINEL:
if self.in_transaction():
commit = False
elif self.commit_select:
commit = True
else:
commit = not sql[:6].lower().startswith('select')
with __exception_wrapper__:
cursor = self.cursor(commit)
try:
cursor.execute(sql, params or ())
except Exception:
if self.autorollback and not self.in_transaction():
self.rollback()
raise
else:
if commit and not self.in_transaction():
self.commit()
return cursor
def execute(self, query, commit=SENTINEL, **context_options):
ctx = self.get_sql_context(**context_options)
sql, params = ctx.sql(query).query()
return self.execute_sql(sql, params, commit=commit)
def get_context_options(self):
return {
'field_types': self._field_types,
'operations': self._operations,
'param': self.param,
'quote': self.quote,
'compound_select_parentheses': self.compound_select_parentheses,
'conflict_statement': self.conflict_statement,
'conflict_update': self.conflict_update,
'for_update': self.for_update,
'index_schema_prefix': self.index_schema_prefix,
'index_using_precedes_table': self.index_using_precedes_table,
'limit_max': self.limit_max,
'nulls_ordering': self.nulls_ordering,
}
def get_sql_context(self, **context_options):
context = self.get_context_options()
if context_options:
context.update(context_options)
return self.context_class(**context)
def conflict_statement(self, on_conflict, query):
raise NotImplementedError
def conflict_update(self, on_conflict, query):
raise NotImplementedError
def _build_on_conflict_update(self, on_conflict, query):
if on_conflict._conflict_target:
stmt = SQL('ON CONFLICT')
target = EnclosedNodeList([
Entity(col) if isinstance(col, basestring) else col
for col in on_conflict._conflict_target])
if on_conflict._conflict_where is not None:
target = NodeList([target, SQL('WHERE'),
on_conflict._conflict_where])
else:
stmt = SQL('ON CONFLICT ON CONSTRAINT')
target = on_conflict._conflict_constraint
if isinstance(target, basestring):
target = Entity(target)
updates = []
if on_conflict._preserve:
for column in on_conflict._preserve:
excluded = NodeList((SQL('EXCLUDED'), ensure_entity(column)),
glue='.')
expression = NodeList((ensure_entity(column), SQL('='),
excluded))
updates.append(expression)
if on_conflict._update:
for k, v in on_conflict._update.items():
if not isinstance(v, Node):
# Attempt to resolve string field-names to their respective
# field object, to apply data-type conversions.
if isinstance(k, basestring):
k = getattr(query.table, k)
if isinstance(k, Field):
v = k.to_value(v)
else:
v = Value(v, unpack=False)
else:
v = QualifiedNames(v)
updates.append(NodeList((ensure_entity(k), SQL('='), v)))
parts = [stmt, target, SQL('DO UPDATE SET'), CommaNodeList(updates)]
if on_conflict._where:
parts.extend((SQL('WHERE'), QualifiedNames(on_conflict._where)))
return NodeList(parts)
def last_insert_id(self, cursor, query_type=None):
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
def default_values_insert(self, ctx):
return ctx.literal('DEFAULT VALUES')
def session_start(self):
with self._lock:
return self.transaction().__enter__()
def session_commit(self):
with self._lock:
try:
txn = self.pop_transaction()
except IndexError:
return False
txn.commit(begin=self.in_transaction())
return True
def session_rollback(self):
with self._lock:
try:
txn = self.pop_transaction()
except IndexError:
return False
txn.rollback(begin=self.in_transaction())
return True
def in_transaction(self):
return bool(self._state.transactions)
def push_transaction(self, transaction):
self._state.transactions.append(transaction)
def pop_transaction(self):
return self._state.transactions.pop()
def transaction_depth(self):
return len(self._state.transactions)
def top_transaction(self):
if self._state.transactions:
return self._state.transactions[-1]
def atomic(self, *args, **kwargs):
return _atomic(self, *args, **kwargs)
def manual_commit(self):
return _manual(self)
def transaction(self, *args, **kwargs):
return _transaction(self, *args, **kwargs)
def savepoint(self):
return _savepoint(self)
def begin(self):
if self.is_closed():
self.connect()
def commit(self):
with __exception_wrapper__:
return self._state.conn.commit()
def rollback(self):
with __exception_wrapper__:
return self._state.conn.rollback()
def batch_commit(self, it, n):
for group in chunked(it, n):
with self.atomic():
for obj in group:
yield obj
def table_exists(self, table_name, schema=None):
if is_model(table_name):
model = table_name
table_name = model._meta.table_name
schema = model._meta.schema
return table_name in self.get_tables(schema=schema)
def get_tables(self, schema=None):
raise NotImplementedError
def get_indexes(self, table, schema=None):
raise NotImplementedError
def get_columns(self, table, schema=None):
raise NotImplementedError
def get_primary_keys(self, table, schema=None):
raise NotImplementedError
def get_foreign_keys(self, table, schema=None):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_tables(self, models, **options):
for model in sort_models(models):
model.create_table(**options)
def drop_tables(self, models, **kwargs):
for model in reversed(sort_models(models)):
model.drop_table(**kwargs)
def extract_date(self, date_part, date_field):
raise NotImplementedError
def truncate_date(self, date_part, date_field):
raise NotImplementedError
def to_timestamp(self, date_field):
raise NotImplementedError
def from_timestamp(self, date_field):
raise NotImplementedError
def random(self):
return fn.random()
def bind(self, models, bind_refs=True, bind_backrefs=True):
for model in models:
model.bind(self, bind_refs=bind_refs, bind_backrefs=bind_backrefs)
def bind_ctx(self, models, bind_refs=True, bind_backrefs=True):
return _BoundModelsContext(models, self, bind_refs, bind_backrefs)
def get_noop_select(self, ctx):
return ctx.sql(Select().columns(SQL('0')).where(SQL('0')))
def __pragma__(name):
def __get__(self):
return self.pragma(name)
def __set__(self, value):
return self.pragma(name, value)
return property(__get__, __set__)
class SqliteDatabase(Database):
field_types = {
'BIGAUTO': FIELD.AUTO,
'BIGINT': FIELD.INT,
'BOOL': FIELD.INT,
'DOUBLE': FIELD.FLOAT,
'SMALLINT': FIELD.INT,
'UUID': FIELD.TEXT}
operations = {
'LIKE': 'GLOB',
'ILIKE': 'LIKE'}
index_schema_prefix = True
limit_max = -1
server_version = __sqlite_version__
truncate_table = False
def __init__(self, database, *args, **kwargs):
self._pragmas = kwargs.pop('pragmas', ())
super(SqliteDatabase, self).__init__(database, *args, **kwargs)
self._aggregates = {}
self._collations = {}
self._functions = {}
self._window_functions = {}
self._table_functions = []
self._extensions = set()
self._attached = {}
self.register_function(_sqlite_date_part, 'date_part', 2)
self.register_function(_sqlite_date_trunc, 'date_trunc', 2)
self.nulls_ordering = self.server_version >= (3, 30, 0)
def init(self, database, pragmas=None, timeout=5, returning_clause=None,
**kwargs):
if pragmas is not None:
self._pragmas = pragmas
if isinstance(self._pragmas, dict):
self._pragmas = list(self._pragmas.items())
if returning_clause is not None:
if __sqlite_version__ < (3, 35, 0):
warnings.warn('RETURNING clause requires Sqlite 3.35 or newer')
self.returning_clause = returning_clause
self._timeout = timeout
super(SqliteDatabase, self).init(database, **kwargs)
def _set_server_version(self, conn):
pass
def _connect(self):
if sqlite3 is None:
raise ImproperlyConfigured('SQLite driver not installed!')
conn = sqlite3.connect(self.database, timeout=self._timeout,
isolation_level=None, **self.connect_params)
try:
self._add_conn_hooks(conn)
except:
conn.close()
raise
return conn
def _add_conn_hooks(self, conn):
if self._attached:
self._attach_databases(conn)
if self._pragmas:
self._set_pragmas(conn)
self._load_aggregates(conn)
self._load_collations(conn)
self._load_functions(conn)
if self.server_version >= (3, 25, 0):
self._load_window_functions(conn)
if self._table_functions:
for table_function in self._table_functions:
table_function.register(conn)
if self._extensions:
self._load_extensions(conn)
def _set_pragmas(self, conn):
cursor = conn.cursor()
for pragma, value in self._pragmas:
cursor.execute('PRAGMA %s = %s;' % (pragma, value))
cursor.close()
def _attach_databases(self, conn):
cursor = conn.cursor()
for name, db in self._attached.items():
cursor.execute('ATTACH DATABASE "%s" AS "%s"' % (db, name))
cursor.close()
def pragma(self, key, value=SENTINEL, permanent=False, schema=None):
if schema is not None:
key = '"%s".%s' % (schema, key)
sql = 'PRAGMA %s' % key
if value is not SENTINEL:
sql += ' = %s' % (value or 0)
if permanent:
pragmas = dict(self._pragmas or ())
pragmas[key] = value
self._pragmas = list(pragmas.items())
elif permanent:
raise ValueError('Cannot specify a permanent pragma without value')
row = self.execute_sql(sql).fetchone()
if row:
return row[0]
cache_size = __pragma__('cache_size')
foreign_keys = __pragma__('foreign_keys')
journal_mode = __pragma__('journal_mode')
journal_size_limit = __pragma__('journal_size_limit')
mmap_size = __pragma__('mmap_size')
page_size = __pragma__('page_size')
read_uncommitted = __pragma__('read_uncommitted')
synchronous = __pragma__('synchronous')
wal_autocheckpoint = __pragma__('wal_autocheckpoint')
application_id = __pragma__('application_id')
user_version = __pragma__('user_version')
data_version = __pragma__('data_version')
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, seconds):
if self._timeout == seconds:
return
self._timeout = seconds
if not self.is_closed():
# PySQLite multiplies user timeout by 1000, but the unit of the
# timeout PRAGMA is actually milliseconds.
self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000))
def _load_aggregates(self, conn):
for name, (klass, num_params) in self._aggregates.items():
conn.create_aggregate(name, num_params, klass)
def _load_collations(self, conn):
for name, fn in self._collations.items():
conn.create_collation(name, fn)
def _load_functions(self, conn):
for name, (fn, num_params) in self._functions.items():
conn.create_function(name, num_params, fn)
def _load_window_functions(self, conn):
for name, (klass, num_params) in self._window_functions.items():
conn.create_window_function(name, num_params, klass)
def register_aggregate(self, klass, name=None, num_params=-1):
self._aggregates[name or klass.__name__.lower()] = (klass, num_params)
if not self.is_closed():
self._load_aggregates(self.connection())
def aggregate(self, name=None, num_params=-1):
def decorator(klass):
self.register_aggregate(klass, name, num_params)
return klass
return decorator
def register_collation(self, fn, name=None):
name = name or fn.__name__
def _collation(*args):
expressions = args + (SQL('collate %s' % name),)
return NodeList(expressions)
fn.collation = _collation
self._collations[name] = fn
if not self.is_closed():
self._load_collations(self.connection())
def collation(self, name=None):
def decorator(fn):
self.register_collation(fn, name)
return fn
return decorator
def register_function(self, fn, name=None, num_params=-1):
self._functions[name or fn.__name__] = (fn, num_params)
if not self.is_closed():
self._load_functions(self.connection())
def func(self, name=None, num_params=-1):
def decorator(fn):
self.register_function(fn, name, num_params)
return fn
return decorator
def register_window_function(self, klass, name=None, num_params=-1):
name = name or klass.__name__.lower()
self._window_functions[name] = (klass, num_params)
if not self.is_closed():
self._load_window_functions(self.connection())
def window_function(self, name=None, num_params=-1):
def decorator(klass):
self.register_window_function(klass, name, num_params)
return klass
return decorator
def register_table_function(self, klass, name=None):
if name is not None:
klass.name = name
self._table_functions.append(klass)
if not self.is_closed():
klass.register(self.connection())
def table_function(self, name=None):
def decorator(klass):
self.register_table_function(klass, name)
return klass
return decorator
def unregister_aggregate(self, name):
del (self._aggregates[name])
def unregister_collation(self, name):
del (self._collations[name])
def unregister_function(self, name):
del (self._functions[name])
def unregister_window_function(self, name):
del (self._window_functions[name])
def unregister_table_function(self, name):
for idx, klass in enumerate(self._table_functions):
if klass.name == name:
break
else:
return False
self._table_functions.pop(idx)
return True
def _load_extensions(self, conn):
conn.enable_load_extension(True)
for extension in self._extensions:
conn.load_extension(extension)
def load_extension(self, extension):
self._extensions.add(extension)
if not self.is_closed():
conn = self.connection()
conn.enable_load_extension(True)
conn.load_extension(extension)
def unload_extension(self, extension):
self._extensions.remove(extension)
def attach(self, filename, name):
if name in self._attached:
if self._attached[name] == filename:
return False
raise OperationalError('schema "%s" already attached.' % name)
self._attached[name] = filename
if not self.is_closed():
self.execute_sql('ATTACH DATABASE "%s" AS "%s"' % (filename, name))
return True
def detach(self, name):
if name not in self._attached:
return False
del self._attached[name]
if not self.is_closed():
self.execute_sql('DETACH DATABASE "%s"' % name)
return True
def last_insert_id(self, cursor, query_type=None):
if not self.returning_clause:
return cursor.lastrowid
elif query_type == Insert.SIMPLE:
try:
return cursor[0][0]
except (IndexError, KeyError, TypeError):
pass
return cursor
def rows_affected(self, cursor):
try:
return cursor.rowcount
except AttributeError:
return cursor.cursor.rowcount # This was a RETURNING query.
def begin(self, lock_type=None):
statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN'
self.execute_sql(statement, commit=False)
def get_tables(self, schema=None):
schema = schema or 'main'
cursor = self.execute_sql('SELECT name FROM "%s".sqlite_master WHERE '
'type=? ORDER BY name' % schema, ('table',))
return [row for row, in cursor.fetchall()]
def get_views(self, schema=None):
sql = ('SELECT name, sql FROM "%s".sqlite_master WHERE type=? '
'ORDER BY name') % (schema or 'main')
return [ViewMetadata(*row) for row in self.execute_sql(sql, ('view',))]
def get_indexes(self, table, schema=None):
schema = schema or 'main'
query = ('SELECT name, sql FROM "%s".sqlite_master '
'WHERE tbl_name = ? AND type = ? ORDER BY name') % schema
cursor = self.execute_sql(query, (table, 'index'))
index_to_sql = dict(cursor.fetchall())
# Determine which indexes have a unique constraint.
unique_indexes = set()
cursor = self.execute_sql('PRAGMA "%s".index_list("%s")' %
(schema, table))
for row in cursor.fetchall():
name = row[1]
is_unique = int(row[2]) == 1
if is_unique:
unique_indexes.add(name)
# Retrieve the indexed columns.
index_columns = {}
for index_name in sorted(index_to_sql):
cursor = self.execute_sql('PRAGMA "%s".index_info("%s")' %
(schema, index_name))
index_columns[index_name] = [row[2] for row in cursor.fetchall()]
return [
IndexMetadata(
name,
index_to_sql[name],
index_columns[name],
name in unique_indexes,
table)
for name in sorted(index_to_sql)]
def get_columns(self, table, schema=None):
cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' %
(schema or 'main', table))
return [ColumnMetadata(r[1], r[2], not r[3], bool(r[5]), table, r[4])
for r in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' %
(schema or 'main', table))
return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())]
def get_foreign_keys(self, table, schema=None):
cursor = self.execute_sql('PRAGMA "%s".foreign_key_list("%s")' %
(schema or 'main', table))
return [ForeignKeyMetadata(row[3], row[2], row[4], table)
for row in cursor.fetchall()]
def get_binary_type(self):
return sqlite3.Binary
def conflict_statement(self, on_conflict, query):
action = on_conflict._action.lower() if on_conflict._action else ''
if action and action not in ('nothing', 'update'):
return SQL('INSERT OR %s' % on_conflict._action.upper())
def conflict_update(self, oc, query):
# Sqlite prior to 3.24.0 does not support Postgres-style upsert.
if self.server_version < (3, 24, 0) and \
any((oc._preserve, oc._update, oc._where, oc._conflict_target,
oc._conflict_constraint)):
raise ValueError('SQLite does not support specifying which values '
'to preserve or update.')
action = oc._action.lower() if oc._action else ''
if action and action not in ('nothing', 'update', ''):
return
if action == 'nothing':
return SQL('ON CONFLICT DO NOTHING')
elif not oc._update and not oc._preserve:
raise ValueError('If you are not performing any updates (or '
'preserving any INSERTed values), then the '
'conflict resolution action should be set to '
'"NOTHING".')
elif oc._conflict_constraint:
raise ValueError('SQLite does not support specifying named '
'constraints for conflict resolution.')
elif not oc._conflict_target:
raise ValueError('SQLite requires that a conflict target be '
'specified when doing an upsert.')
return self._build_on_conflict_update(oc, query)
def extract_date(self, date_part, date_field):
return fn.date_part(date_part, date_field, python_value=int)
def truncate_date(self, date_part, date_field):
return fn.date_trunc(date_part, date_field,
python_value=simple_date_time)
def to_timestamp(self, date_field):
return fn.strftime('%s', date_field).cast('integer')
def from_timestamp(self, date_field):
return fn.datetime(date_field, 'unixepoch')
class PostgresqlDatabase(Database):
field_types = {
'AUTO': 'SERIAL',
'BIGAUTO': 'BIGSERIAL',
'BLOB': 'BYTEA',
'BOOL': 'BOOLEAN',
'DATETIME': 'TIMESTAMP',
'DECIMAL': 'NUMERIC',
'DOUBLE': 'DOUBLE PRECISION',
'UUID': 'UUID',
'UUIDB': 'BYTEA'}
operations = {'REGEXP': '~', 'IREGEXP': '~*'}
param = '%s'
commit_select = True
compound_select_parentheses = CSQ_PARENTHESES_ALWAYS
for_update = True
nulls_ordering = True
returning_clause = True
safe_create_index = False
sequences = True
def init(self, database, register_unicode=True, encoding=None,
isolation_level=None, **kwargs):
self._register_unicode = register_unicode
self._encoding = encoding
self._isolation_level = isolation_level
super(PostgresqlDatabase, self).init(database, **kwargs)
def _connect(self):
if psycopg2 is None:
raise ImproperlyConfigured('Postgres driver not installed!')
# Handle connection-strings nicely, since psycopg2 will accept them,
# and they may be easier when lots of parameters are specified.
params = self.connect_params.copy()
if self.database.startswith('postgresql://'):
params.setdefault('dsn', self.database)
else:
params.setdefault('dbname', self.database)
conn = psycopg2.connect(**params)
if self._register_unicode:
pg_extensions.register_type(pg_extensions.UNICODE, conn)
pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn)
if self._encoding:
conn.set_client_encoding(self._encoding)
if self._isolation_level:
conn.set_isolation_level(self._isolation_level)
return conn
def _set_server_version(self, conn):
self.server_version = conn.server_version
if self.server_version >= 90600:
self.safe_create_index = True
def is_connection_usable(self):
if self._state.closed:
return False
# Returns True if we are idle, running a command, or in an active
# connection. If the connection is in an error state or the connection
# is otherwise unusable, return False.
txn_status = self._state.conn.get_transaction_status()
return txn_status < pg_extensions.TRANSACTION_STATUS_INERROR
def last_insert_id(self, cursor, query_type=None):
try:
return cursor if query_type != Insert.SIMPLE else cursor[0][0]
except (IndexError, KeyError, TypeError):
pass
def rows_affected(self, cursor):
try:
return cursor.rowcount
except AttributeError:
return cursor.cursor.rowcount
def get_tables(self, schema=None):
query = ('SELECT tablename FROM pg_catalog.pg_tables '
'WHERE schemaname = %s ORDER BY tablename')
cursor = self.execute_sql(query, (schema or 'public',))
return [table for table, in cursor.fetchall()]
def get_views(self, schema=None):
query = ('SELECT viewname, definition FROM pg_catalog.pg_views '
'WHERE schemaname = %s ORDER BY viewname')
cursor = self.execute_sql(query, (schema or 'public',))
return [ViewMetadata(view_name, sql.strip(' \t;'))
for (view_name, sql) in cursor.fetchall()]
def get_indexes(self, table, schema=None):
query = """
SELECT
i.relname, idxs.indexdef, idx.indisunique,
array_to_string(ARRAY(
SELECT pg_get_indexdef(idx.indexrelid, k + 1, TRUE)
FROM generate_subscripts(idx.indkey, 1) AS k
ORDER BY k), ',')
FROM pg_catalog.pg_class AS t
INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid
INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid
INNER JOIN pg_catalog.pg_indexes AS idxs ON
(idxs.tablename = t.relname AND idxs.indexname = i.relname)
WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s
ORDER BY idx.indisunique DESC, i.relname;"""
cursor = self.execute_sql(query, (table, 'r', schema or 'public'))
return [IndexMetadata(name, sql.rstrip(' ;'), columns.split(','),
is_unique, table)
for name, sql, is_unique, columns in cursor.fetchall()]
def get_columns(self, table, schema=None):
query = """
SELECT column_name, is_nullable, data_type, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = %s
ORDER BY ordinal_position"""
cursor = self.execute_sql(query, (table, schema or 'public'))
pks = set(self.get_primary_keys(table, schema))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df)
for name, null, dt, df in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
query = """
SELECT kc.column_name
FROM information_schema.table_constraints AS tc
INNER JOIN information_schema.key_column_usage AS kc ON (
tc.table_name = kc.table_name AND
tc.table_schema = kc.table_schema AND
tc.constraint_name = kc.constraint_name)
WHERE
tc.constraint_type = %s AND
tc.table_name = %s AND
tc.table_schema = %s"""
ctype = 'PRIMARY KEY'
cursor = self.execute_sql(query, (ctype, table, schema or 'public'))
return [pk for pk, in cursor.fetchall()]
def get_foreign_keys(self, table, schema=None):
sql = """
SELECT DISTINCT
kcu.column_name, ccu.table_name, ccu.column_name
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON (tc.constraint_name = kcu.constraint_name AND
tc.constraint_schema = kcu.constraint_schema AND
tc.table_name = kcu.table_name AND
tc.table_schema = kcu.table_schema)
JOIN information_schema.constraint_column_usage AS ccu
ON (ccu.constraint_name = tc.constraint_name AND
ccu.constraint_schema = tc.constraint_schema)
WHERE
tc.constraint_type = 'FOREIGN KEY' AND
tc.table_name = %s AND
tc.table_schema = %s"""
cursor = self.execute_sql(sql, (table, schema or 'public'))
return [ForeignKeyMetadata(row[0], row[1], row[2], table)
for row in cursor.fetchall()]
def sequence_exists(self, sequence):
res = self.execute_sql("""
SELECT COUNT(*) FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
def get_binary_type(self):
return psycopg2.Binary
def conflict_statement(self, on_conflict, query):
return
def conflict_update(self, oc, query):
action = oc._action.lower() if oc._action else ''
if action in ('ignore', 'nothing'):
parts = [SQL('ON CONFLICT')]
if oc._conflict_target:
parts.append(EnclosedNodeList([
Entity(col) if isinstance(col, basestring) else col
for col in oc._conflict_target]))
parts.append(SQL('DO NOTHING'))
return NodeList(parts)
elif action and action != 'update':
raise ValueError('The only supported actions for conflict '
'resolution with Postgresql are "ignore" or '
'"update".')
elif not oc._update and not oc._preserve:
raise ValueError('If you are not performing any updates (or '
'preserving any INSERTed values), then the '
'conflict resolution action should be set to '
'"IGNORE".')
elif not (oc._conflict_target or oc._conflict_constraint):
raise ValueError('Postgres requires that a conflict target be '
'specified when doing an upsert.')
return self._build_on_conflict_update(oc, query)
def extract_date(self, date_part, date_field):
return fn.EXTRACT(NodeList((date_part, SQL('FROM'), date_field)))
def truncate_date(self, date_part, date_field):
return fn.DATE_TRUNC(date_part, date_field)
def to_timestamp(self, date_field):
return self.extract_date('EPOCH', date_field)
def from_timestamp(self, date_field):
# Ironically, here, Postgres means "to the Postgresql timestamp type".
return fn.to_timestamp(date_field)
def get_noop_select(self, ctx):
return ctx.sql(Select().columns(SQL('0')).where(SQL('false')))
def set_time_zone(self, timezone):
self.execute_sql('set time zone "%s";' % timezone)
class MySQLDatabase(Database):
field_types = {
'AUTO': 'INTEGER AUTO_INCREMENT',
'BIGAUTO': 'BIGINT AUTO_INCREMENT',
'BOOL': 'BOOL',
'DECIMAL': 'NUMERIC',
'DOUBLE': 'DOUBLE PRECISION',
'FLOAT': 'FLOAT',
'UUID': 'VARCHAR(40)',
'UUIDB': 'VARBINARY(16)'}
operations = {
'LIKE': 'LIKE BINARY',
'ILIKE': 'LIKE',
'REGEXP': 'REGEXP BINARY',
'IREGEXP': 'REGEXP',
'XOR': 'XOR'}
param = '%s'
quote = '``'
commit_select = True
compound_select_parentheses = CSQ_PARENTHESES_UNNESTED
for_update = True
index_using_precedes_table = True
limit_max = 2 ** 64 - 1
safe_create_index = False
safe_drop_index = False
sql_mode = 'PIPES_AS_CONCAT'
def init(self, database, **kwargs):
params = {
'charset': 'utf8',
'sql_mode': self.sql_mode,
'use_unicode': True}
params.update(kwargs)
if 'password' in params and mysql_passwd:
params['passwd'] = params.pop('password')
super(MySQLDatabase, self).init(database, **params)
def _connect(self):
if mysql is None:
raise ImproperlyConfigured('MySQL driver not installed!')
conn = mysql.connect(db=self.database, **self.connect_params)
return conn
def _set_server_version(self, conn):
try:
version_raw = conn.server_version
except AttributeError:
version_raw = conn.get_server_info()
self.server_version = self._extract_server_version(version_raw)
def _extract_server_version(self, version):
version = version.lower()
if 'maria' in version:
match_obj = re.search(r'(1\d\.\d+\.\d+)', version)
else:
match_obj = re.search(r'(\d\.\d+\.\d+)', version)
if match_obj is not None:
return tuple(int(num) for num in match_obj.groups()[0].split('.'))
warnings.warn('Unable to determine MySQL version: "%s"' % version)
return (0, 0, 0) # Unable to determine version!
def is_connection_usable(self):
if self._state.closed:
return False
conn = self._state.conn
if hasattr(conn, 'ping'):
try:
conn.ping(False)
except Exception:
return False
return True
def default_values_insert(self, ctx):
return ctx.literal('() VALUES ()')
def get_tables(self, schema=None):
query = ('SELECT table_name FROM information_schema.tables '
'WHERE table_schema = DATABASE() AND table_type != %s '
'ORDER BY table_name')
return [table for table, in self.execute_sql(query, ('VIEW',))]
def get_views(self, schema=None):
query = ('SELECT table_name, view_definition '
'FROM information_schema.views '
'WHERE table_schema = DATABASE() ORDER BY table_name')
cursor = self.execute_sql(query)
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_indexes(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
unique = set()
indexes = {}
for row in cursor.fetchall():
if not row[1]:
unique.add(row[2])
indexes.setdefault(row[2], [])
indexes[row[2]].append(row[4])
return [IndexMetadata(name, None, indexes[name], name in unique, table)
for name in indexes]
def get_columns(self, table, schema=None):
sql = """
SELECT column_name, is_nullable, data_type, column_default
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()"""
cursor = self.execute_sql(sql, (table,))
pks = set(self.get_primary_keys(table))
return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df)
for name, null, dt, df in cursor.fetchall()]
def get_primary_keys(self, table, schema=None):
cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
return [row[4] for row in
filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())]
def get_foreign_keys(self, table, schema=None):
query = """
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL"""
cursor = self.execute_sql(query, (table,))
return [
ForeignKeyMetadata(column, dest_table, dest_column, table)
for column, dest_table, dest_column in cursor.fetchall()]
def get_binary_type(self):
return mysql.Binary
def conflict_statement(self, on_conflict, query):
if not on_conflict._action: return
action = on_conflict._action.lower()
if action == 'replace':
return SQL('REPLACE')
elif action == 'ignore':
return SQL('INSERT IGNORE')
elif action != 'update':
raise ValueError('Un-supported action for conflict resolution. '
'MySQL supports REPLACE, IGNORE and UPDATE.')
def conflict_update(self, on_conflict, query):
if on_conflict._where or on_conflict._conflict_target or \
on_conflict._conflict_constraint:
raise ValueError('MySQL does not support the specification of '
'where clauses or conflict targets for conflict '
'resolution.')
updates = []
if on_conflict._preserve:
# Here we need to determine which function to use, which varies
# depending on the MySQL server version. MySQL and MariaDB prior to
# 10.3.3 use "VALUES", while MariaDB 10.3.3+ use "VALUE".
version = self.server_version or (0,)
if version[0] == 10 and version >= (10, 3, 3):
VALUE_FN = fn.VALUE
else:
VALUE_FN = fn.VALUES
for column in on_conflict._preserve:
entity = ensure_entity(column)
expression = NodeList((
ensure_entity(column),
SQL('='),
VALUE_FN(entity)))
updates.append(expression)
if on_conflict._update:
for k, v in on_conflict._update.items():
if not isinstance(v, Node):
# Attempt to resolve string field-names to their respective
# field object, to apply data-type conversions.
if isinstance(k, basestring):
k = getattr(query.table, k)
if isinstance(k, Field):
v = k.to_value(v)
else:
v = Value(v, unpack=False)
updates.append(NodeList((ensure_entity(k), SQL('='), v)))
if updates:
return NodeList((SQL('ON DUPLICATE KEY UPDATE'),
CommaNodeList(updates)))
def extract_date(self, date_part, date_field):
return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field)))
def truncate_date(self, date_part, date_field):
return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part],
python_value=simple_date_time)
def to_timestamp(self, date_field):
return fn.UNIX_TIMESTAMP(date_field)
def from_timestamp(self, date_field):
return fn.FROM_UNIXTIME(date_field)
def random(self):
return fn.rand()
def get_noop_select(self, ctx):
return ctx.literal('DO 0')
# TRANSACTION CONTROL.
class _manual(_callable_context_manager):
def __init__(self, db):
self.db = db
def __enter__(self):
top = self.db.top_transaction()
if top is not None and not isinstance(top, _manual):
raise ValueError('Cannot enter manual commit block while a '
'transaction is active.')
self.db.push_transaction(self)
def __exit__(self, exc_type, exc_val, exc_tb):
if self.db.pop_transaction() is not self:
raise ValueError('Transaction stack corrupted while exiting '
'manual commit block.')
class _atomic(_callable_context_manager):
def __init__(self, db, *args, **kwargs):
self.db = db
self._transaction_args = (args, kwargs)
def __enter__(self):
if self.db.transaction_depth() == 0:
args, kwargs = self._transaction_args
self._helper = self.db.transaction(*args, **kwargs)
elif isinstance(self.db.top_transaction(), _manual):
raise ValueError('Cannot enter atomic commit block while in '
'manual commit mode.')
else:
self._helper = self.db.savepoint()
return self._helper.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._helper.__exit__(exc_type, exc_val, exc_tb)
class _transaction(_callable_context_manager):
def __init__(self, db, *args, **kwargs):
self.db = db
self._begin_args = (args, kwargs)
def _begin(self):
args, kwargs = self._begin_args
self.db.begin(*args, **kwargs)
def commit(self, begin=True):
self.db.commit()
if begin:
self._begin()
def rollback(self, begin=True):
self.db.rollback()
if begin:
self._begin()
def __enter__(self):
if self.db.transaction_depth() == 0:
self._begin()
self.db.push_transaction(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
self.rollback(False)
elif self.db.transaction_depth() == 1:
try:
self.commit(False)
except:
self.rollback(False)
raise
finally:
self.db.pop_transaction()
class _savepoint(_callable_context_manager):
def __init__(self, db, sid=None):
self.db = db
self.sid = sid or 's' + uuid.uuid4().hex
self.quoted_sid = self.sid.join(self.db.quote)
def _begin(self):
self.db.execute_sql('SAVEPOINT %s;' % self.quoted_sid)
def commit(self, begin=True):
self.db.execute_sql('RELEASE SAVEPOINT %s;' % self.quoted_sid)
if begin: self._begin()
def rollback(self):
self.db.execute_sql('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
def __enter__(self):
self._begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.rollback()
else:
try:
self.commit(begin=False)
except:
self.rollback()
raise
# CURSOR REPRESENTATIONS.
class CursorWrapper(object):
def __init__(self, cursor):
self.cursor = cursor
self.count = 0
self.index = 0
self.initialized = False
self.populated = False
self.row_cache = []
def __iter__(self):
if self.populated:
return iter(self.row_cache)
return ResultIterator(self)
def __getitem__(self, item):
if isinstance(item, slice):
stop = item.stop
if stop is None or stop < 0:
self.fill_cache()
else:
self.fill_cache(stop)
return self.row_cache[item]
elif isinstance(item, int):
self.fill_cache(item if item > 0 else 0)
return self.row_cache[item]
else:
raise ValueError('CursorWrapper only supports integer and slice '
'indexes.')
def __len__(self):
self.fill_cache()
return self.count
def initialize(self):
pass
def iterate(self, cache=True):
row = self.cursor.fetchone()
if row is None:
self.populated = True
self.cursor.close()
raise StopIteration
elif not self.initialized:
self.initialize() # Lazy initialization.
self.initialized = True
self.count += 1
result = self.process_row(row)
if cache:
self.row_cache.append(result)
return result
def process_row(self, row):
return row
def iterator(self):
"""Efficient one-pass iteration over the result set."""
while True:
try:
yield self.iterate(False)
except StopIteration:
return
def fill_cache(self, n=0):
n = n or float('Inf')
if n < 0:
raise ValueError('Negative values are not supported.')
iterator = ResultIterator(self)
iterator.index = self.count
while not self.populated and (n > self.count):
try:
iterator.next()
except StopIteration:
break
class DictCursorWrapper(CursorWrapper):
def _initialize_columns(self):
description = self.cursor.description
self.columns = [t[0][t[0].rfind('.') + 1:].strip('()"`')
for t in description]
self.ncols = len(description)
initialize = _initialize_columns
def _row_to_dict(self, row):
result = {}
for i in range(self.ncols):
result.setdefault(self.columns[i], row[i]) # Do not overwrite.
return result
process_row = _row_to_dict
class NamedTupleCursorWrapper(CursorWrapper):
def initialize(self):
description = self.cursor.description
self.tuple_class = collections.namedtuple('Row', [
t[0][t[0].rfind('.') + 1:].strip('()"`') for t in description])
def process_row(self, row):
return self.tuple_class(*row)
class ObjectCursorWrapper(DictCursorWrapper):
def __init__(self, cursor, constructor):
super(ObjectCursorWrapper, self).__init__(cursor)
self.constructor = constructor
def process_row(self, row):
row_dict = self._row_to_dict(row)
return self.constructor(**row_dict)
class ResultIterator(object):
def __init__(self, cursor_wrapper):
self.cursor_wrapper = cursor_wrapper
self.index = 0
def __iter__(self):
return self
def next(self):
if self.index < self.cursor_wrapper.count:
obj = self.cursor_wrapper.row_cache[self.index]
elif not self.cursor_wrapper.populated:
self.cursor_wrapper.iterate()
obj = self.cursor_wrapper.row_cache[self.index]
else:
raise StopIteration
self.index += 1
return obj
__next__ = next
# FIELDS
class FieldAccessor(object):
def __init__(self, model, field, name):
self.model = model
self.field = field
self.name = name
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance.__data__.get(self.name)
return self.field
def __set__(self, instance, value):
instance.__data__[self.name] = value
instance._dirty.add(self.name)
class ForeignKeyAccessor(FieldAccessor):
def __init__(self, model, field, name):
super(ForeignKeyAccessor, self).__init__(model, field, name)
self.rel_model = field.rel_model
def get_rel_instance(self, instance):
value = instance.__data__.get(self.name)
if value is not None or self.name in instance.__rel__:
if self.name not in instance.__rel__ and self.field.lazy_load:
obj = self.rel_model.get(self.field.rel_field == value)
instance.__rel__[self.name] = obj
return instance.__rel__.get(self.name, value)
elif not self.field.null and self.field.lazy_load:
raise self.rel_model.DoesNotExist
return value
def __get__(self, instance, instance_type=None):
if instance is not None:
return self.get_rel_instance(instance)
return self.field
def __set__(self, instance, obj):
if isinstance(obj, self.rel_model):
instance.__data__[self.name] = getattr(obj, self.field.rel_field.name)
instance.__rel__[self.name] = obj
else:
fk_value = instance.__data__.get(self.name)
instance.__data__[self.name] = obj
if (obj != fk_value or obj is None) and \
self.name in instance.__rel__:
del instance.__rel__[self.name]
instance._dirty.add(self.name)
class BackrefAccessor(object):
def __init__(self, field):
self.field = field
self.model = field.rel_model
self.rel_model = field.model
def __get__(self, instance, instance_type=None):
if instance is not None:
dest = self.field.rel_field.name
return (self.rel_model
.select()
.where(self.field == getattr(instance, dest)))
return self
class ObjectIdAccessor(object):
"""Gives direct access to the underlying id"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is not None:
value = instance.__data__.get(self.field.name)
# Pull the object-id from the related object if it is not set.
if value is None and self.field.name in instance.__rel__:
rel_obj = instance.__rel__[self.field.name]
value = getattr(rel_obj, self.field.rel_field.name)
return value
return self.field
def __set__(self, instance, value):
setattr(instance, self.field.name, value)
class Field(ColumnBase):
_field_counter = 0
_order = 0
accessor_class = FieldAccessor
auto_increment = False
default_index_type = None
field_type = 'DEFAULT'
unpack = True
def __init__(self, null=False, index=False, unique=False, column_name=None,
default=None, primary_key=False, constraints=None,
sequence=None, collation=None, unindexed=False, choices=None,
help_text=None, verbose_name=None, index_type=None,
db_column=None, _hidden=False):
if db_column is not None:
__deprecated__('"db_column" has been deprecated in favor of '
'"column_name" for Field objects.')
column_name = db_column
self.null = null
self.index = index
self.unique = unique
self.column_name = column_name
self.default = default
self.primary_key = primary_key
self.constraints = constraints # List of column constraints.
self.sequence = sequence # Name of sequence, e.g. foo_id_seq.
self.collation = collation
self.unindexed = unindexed
self.choices = choices
self.help_text = help_text
self.verbose_name = verbose_name
self.index_type = index_type or self.default_index_type
self._hidden = _hidden
# Used internally for recovering the order in which Fields were defined
# on the Model class.
Field._field_counter += 1
self._order = Field._field_counter
self._sort_key = (self.primary_key and 1 or 2), self._order
def __hash__(self):
return hash(self.name + '.' + self.model.__name__)
def __repr__(self):
if hasattr(self, 'model') and getattr(self, 'name', None):
return '<%s: %s.%s>' % (type(self).__name__,
self.model.__name__,
self.name)
return '<%s: (unbound)>' % type(self).__name__
def bind(self, model, name, set_attribute=True):
self.model = model
self.name = self.safe_name = name
self.column_name = self.column_name or name
if set_attribute:
setattr(model, name, self.accessor_class(model, self, name))
@property
def column(self):
return Column(self.model._meta.table, self.column_name)
def adapt(self, value):
return value
def db_value(self, value):
return value if value is None else self.adapt(value)
def python_value(self, value):
return value if value is None else self.adapt(value)
def to_value(self, value):
return Value(value, self.db_value, unpack=False)
def get_sort_key(self, ctx):
return self._sort_key
def __sql__(self, ctx):
return ctx.sql(self.column)
def get_modifiers(self):
pass
def ddl_datatype(self, ctx):
if ctx and ctx.state.field_types:
column_type = ctx.state.field_types.get(self.field_type,
self.field_type)
else:
column_type = self.field_type
modifiers = self.get_modifiers()
if column_type and modifiers:
modifier_literal = ', '.join([str(m) for m in modifiers])
return SQL('%s(%s)' % (column_type, modifier_literal))
else:
return SQL(column_type)
def ddl(self, ctx):
accum = [Entity(self.column_name)]
data_type = self.ddl_datatype(ctx)
if data_type:
accum.append(data_type)
if self.unindexed:
accum.append(SQL('UNINDEXED'))
if not self.null:
accum.append(SQL('NOT NULL'))
if self.primary_key:
accum.append(SQL('PRIMARY KEY'))
if self.sequence:
accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
if self.constraints:
accum.extend(self.constraints)
if self.collation:
accum.append(SQL('COLLATE %s' % self.collation))
return NodeList(accum)
class AnyField(Field):
field_type = 'ANY'
class IntegerField(Field):
field_type = 'INT'
def adapt(self, value):
try:
return int(value)
except ValueError:
return value
class BigIntegerField(IntegerField):
field_type = 'BIGINT'
class SmallIntegerField(IntegerField):
field_type = 'SMALLINT'
class AutoField(IntegerField):
auto_increment = True
field_type = 'AUTO'
def __init__(self, *args, **kwargs):
if kwargs.get('primary_key') is False:
raise ValueError('%s must always be a primary key.' % type(self))
kwargs['primary_key'] = True
super(AutoField, self).__init__(*args, **kwargs)
class BigAutoField(AutoField):
field_type = 'BIGAUTO'
class IdentityField(AutoField):
field_type = 'INT GENERATED BY DEFAULT AS IDENTITY'
def __init__(self, generate_always=False, **kwargs):
if generate_always:
self.field_type = 'INT GENERATED ALWAYS AS IDENTITY'
super(IdentityField, self).__init__(**kwargs)
class PrimaryKeyField(AutoField):
def __init__(self, *args, **kwargs):
__deprecated__('"PrimaryKeyField" has been renamed to "AutoField". '
'Please update your code accordingly as this will be '
'completely removed in a subsequent release.')
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
field_type = 'FLOAT'
def adapt(self, value):
try:
return float(value)
except ValueError:
return value
class DoubleField(FloatField):
field_type = 'DOUBLE'
class DecimalField(Field):
field_type = 'DECIMAL'
def __init__(self, max_digits=10, decimal_places=5, auto_round=False,
rounding=None, *args, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.auto_round = auto_round
self.rounding = rounding or decimal.DefaultContext.rounding
self._exp = decimal.Decimal(10) ** (-self.decimal_places)
super(DecimalField, self).__init__(*args, **kwargs)
def get_modifiers(self):
return [self.max_digits, self.decimal_places]
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.auto_round:
decimal_value = D(text_type(value))
return decimal_value.quantize(self._exp, rounding=self.rounding)
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(text_type(value))
class _StringField(Field):
"""
字符串类型的字段
"""
def adapt(self, value):
if isinstance(value, text_type):
return value
elif isinstance(value, bytes_type):
return value.decode('utf-8')
return text_type(value)
def __add__(self, other):
return StringExpression(self, OP.CONCAT, other)
def __radd__(self, other):
return StringExpression(other, OP.CONCAT, self)
class CharField(_StringField):
"""
对应MySQL中varchar类型的字段
"""
field_type = 'VARCHAR'
def __init__(self, max_length=255, *args, **kwargs):
self.max_length = max_length
super(CharField, self).__init__(*args, **kwargs)
def get_modifiers(self):
return self.max_length and [self.max_length] or None
class FixedCharField(CharField):
field_type = 'CHAR'
def python_value(self, value):
value = super(FixedCharField, self).python_value(value)
if value:
value = value.strip()
return value
class TextField(_StringField):
field_type = 'TEXT'
class BlobField(Field):
field_type = 'BLOB'
def _db_hook(self, database):
if database is None:
self._constructor = bytearray
else:
self._constructor = database.get_binary_type()
def bind(self, model, name, set_attribute=True):
self._constructor = bytearray
if model._meta.database:
if isinstance(model._meta.database, Proxy):
model._meta.database.attach_callback(self._db_hook)
else:
self._db_hook(model._meta.database)
# Attach a hook to the model metadata; in the event the database is
# changed or set at run-time, we will be sure to apply our callback and
# use the proper data-type for our database driver.
model._meta._db_hooks.append(self._db_hook)
return super(BlobField, self).bind(model, name, set_attribute)
def db_value(self, value):
if isinstance(value, text_type):
value = value.encode('raw_unicode_escape')
if isinstance(value, bytes_type):
return self._constructor(value)
return value
class BitField(BitwiseMixin, BigIntegerField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', 0)
super(BitField, self).__init__(*args, **kwargs)
self.__current_flag = 1
def flag(self, value=None):
if value is None:
value = self.__current_flag
self.__current_flag <<= 1
else:
self.__current_flag = value << 1
class FlagDescriptor(ColumnBase):
def __init__(self, field, value):
self._field = field
self._value = value
super(FlagDescriptor, self).__init__()
def clear(self):
return self._field.bin_and(~self._value)
def set(self):
return self._field.bin_or(self._value)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = getattr(instance, self._field.name) or 0
return (value & self._value) != 0
def __set__(self, instance, is_set):
if is_set not in (True, False):
raise ValueError('Value must be either True or False')
value = getattr(instance, self._field.name) or 0
if is_set:
value |= self._value
else:
value &= ~self._value
setattr(instance, self._field.name, value)
def __sql__(self, ctx):
return ctx.sql(self._field.bin_and(self._value) != 0)
return FlagDescriptor(self, value)
class BigBitFieldData(object):
def __init__(self, instance, name):
self.instance = instance
self.name = name
value = self.instance.__data__.get(self.name)
if not value:
value = bytearray()
elif not isinstance(value, bytearray):
value = bytearray(value)
self._buffer = self.instance.__data__[self.name] = value
def _ensure_length(self, idx):
byte_num, byte_offset = divmod(idx, 8)
cur_size = len(self._buffer)
if cur_size <= byte_num:
self._buffer.extend(b'\x00' * ((byte_num + 1) - cur_size))
return byte_num, byte_offset
def set_bit(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
self._buffer[byte_num] |= (1 << byte_offset)
def clear_bit(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
self._buffer[byte_num] &= ~(1 << byte_offset)
def toggle_bit(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
self._buffer[byte_num] ^= (1 << byte_offset)
return bool(self._buffer[byte_num] & (1 << byte_offset))
def is_set(self, idx):
byte_num, byte_offset = self._ensure_length(idx)
return bool(self._buffer[byte_num] & (1 << byte_offset))
def __repr__(self):
return repr(self._buffer)
class BigBitFieldAccessor(FieldAccessor):
def __get__(self, instance, instance_type=None):
if instance is None:
return self.field
return BigBitFieldData(instance, self.name)
def __set__(self, instance, value):
if isinstance(value, memoryview):
value = value.tobytes()
elif isinstance(value, buffer_type):
value = bytes(value)
elif isinstance(value, bytearray):
value = bytes_type(value)
elif isinstance(value, BigBitFieldData):
value = bytes_type(value._buffer)
elif isinstance(value, text_type):
value = value.encode('utf-8')
elif not isinstance(value, bytes_type):
raise ValueError('Value must be either a bytes, memoryview or '
'BigBitFieldData instance.')
super(BigBitFieldAccessor, self).__set__(instance, value)
class BigBitField(BlobField):
accessor_class = BigBitFieldAccessor
def __init__(self, *args, **kwargs):
kwargs.setdefault('default', bytes_type)
super(BigBitField, self).__init__(*args, **kwargs)
def db_value(self, value):
return bytes_type(value) if value is not None else value
class UUIDField(Field):
field_type = 'UUID'
def db_value(self, value):
if isinstance(value, basestring) and len(value) == 32:
# Hex string. No transformation is necessary.
return value
elif isinstance(value, bytes) and len(value) == 16:
# Allow raw binary representation.
value = uuid.UUID(bytes=value)
if isinstance(value, uuid.UUID):
return value.hex
try:
return uuid.UUID(value).hex
except:
return value
def python_value(self, value):
if isinstance(value, uuid.UUID):
return value
return uuid.UUID(value) if value is not None else None
class BinaryUUIDField(BlobField):
field_type = 'UUIDB'
def db_value(self, value):
if isinstance(value, bytes) and len(value) == 16:
# Raw binary value. No transformation is necessary.
return self._constructor(value)
elif isinstance(value, basestring) and len(value) == 32:
# Allow hex string representation.
value = uuid.UUID(hex=value)
if isinstance(value, uuid.UUID):
return self._constructor(value.bytes)
elif value is not None:
raise ValueError('value for binary UUID field must be UUID(), '
'a hexadecimal string, or a bytes object.')
def python_value(self, value):
if isinstance(value, uuid.UUID):
return value
elif isinstance(value, memoryview):
value = value.tobytes()
elif value and not isinstance(value, bytes):
value = bytes(value)
return uuid.UUID(bytes=value) if value is not None else None
def _date_part(date_part):
def dec(self):
return self.model._meta.database.extract_date(date_part, self)
return dec
def format_date_time(value, formats, post_process=None):
post_process = post_process or (lambda x: x)
for fmt in formats:
try:
return post_process(datetime.datetime.strptime(value, fmt))
except ValueError:
pass
return value
def simple_date_time(value):
try:
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except (TypeError, ValueError):
return value
class _BaseFormattedField(Field):
formats = None
def __init__(self, formats=None, *args, **kwargs):
if formats is not None:
self.formats = formats
super(_BaseFormattedField, self).__init__(*args, **kwargs)
class DateTimeField(_BaseFormattedField):
field_type = 'DATETIME'
formats = [
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d',
]
def adapt(self, value):
if value and isinstance(value, basestring):
return format_date_time(value, self.formats)
return value
def to_timestamp(self):
return self.model._meta.database.to_timestamp(self)
def truncate(self, part):
return self.model._meta.database.truncate_date(part, self)
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
class DateField(_BaseFormattedField):
field_type = 'DATE'
formats = [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
def adapt(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.formats, pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
def to_timestamp(self):
return self.model._meta.database.to_timestamp(self)
def truncate(self, part):
return self.model._meta.database.truncate_date(part, self)
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
class TimeField(_BaseFormattedField):
field_type = 'TIME'
formats = [
'%H:%M:%S.%f',
'%H:%M:%S',
'%H:%M',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S',
]
def adapt(self, value):
if value:
if isinstance(value, basestring):
pp = lambda x: x.time()
return format_date_time(value, self.formats, pp)
elif isinstance(value, datetime.datetime):
return value.time()
if value is not None and isinstance(value, datetime.timedelta):
return (datetime.datetime.min + value).time()
return value
hour = property(_date_part('hour'))
minute = property(_date_part('minute'))
second = property(_date_part('second'))
def _timestamp_date_part(date_part):
def dec(self):
db = self.model._meta.database
expr = ((self / Value(self.resolution, converter=False))
if self.resolution > 1 else self)
return db.extract_date(date_part, db.from_timestamp(expr))
return dec
class TimestampField(BigIntegerField):
# Support second -> microsecond resolution.
valid_resolutions = [10 ** i for i in range(7)]
def __init__(self, *args, **kwargs):
self.resolution = kwargs.pop('resolution', None)
if not self.resolution:
self.resolution = 1
elif self.resolution in range(2, 7):
self.resolution = 10 ** self.resolution
elif self.resolution not in self.valid_resolutions:
raise ValueError('TimestampField resolution must be one of: %s' %
', '.join(str(i) for i in self.valid_resolutions))
self.ticks_to_microsecond = 1000000 // self.resolution
self.utc = kwargs.pop('utc', False) or False
dflt = datetime.datetime.utcnow if self.utc else datetime.datetime.now
kwargs.setdefault('default', dflt)
super(TimestampField, self).__init__(*args, **kwargs)
def local_to_utc(self, dt):
# Convert naive local datetime into naive UTC, e.g.:
# 2019-03-01T12:00:00 (local=US/Central) -> 2019-03-01T18:00:00.
# 2019-05-01T12:00:00 (local=US/Central) -> 2019-05-01T17:00:00.
# 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00.
return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6])
def utc_to_local(self, dt):
# Convert a naive UTC datetime into local time, e.g.:
# 2019-03-01T18:00:00 (local=US/Central) -> 2019-03-01T12:00:00.
# 2019-05-01T17:00:00 (local=US/Central) -> 2019-05-01T12:00:00.
# 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00.
ts = calendar.timegm(dt.utctimetuple())
return datetime.datetime.fromtimestamp(ts)
def get_timestamp(self, value):
if self.utc:
# If utc-mode is on, then we assume all naive datetimes are in UTC.
return calendar.timegm(value.utctimetuple())
else:
return time.mktime(value.timetuple())
def db_value(self, value):
if value is None:
return
if isinstance(value, datetime.datetime):
pass
elif isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
else:
return int(round(value * self.resolution))
timestamp = self.get_timestamp(value)
if self.resolution > 1:
timestamp += (value.microsecond * .000001)
timestamp *= self.resolution
return int(round(timestamp))
def python_value(self, value):
if value is not None and isinstance(value, (int, float, long)):
if self.resolution > 1:
value, ticks = divmod(value, self.resolution)
microseconds = int(ticks * self.ticks_to_microsecond)
else:
microseconds = 0
if self.utc:
value = datetime.datetime.utcfromtimestamp(value)
else:
value = datetime.datetime.fromtimestamp(value)
if microseconds:
value = value.replace(microsecond=microseconds)
return value
def from_timestamp(self):
expr = ((self / Value(self.resolution, converter=False))
if self.resolution > 1 else self)
return self.model._meta.database.from_timestamp(expr)
year = property(_timestamp_date_part('year'))
month = property(_timestamp_date_part('month'))
day = property(_timestamp_date_part('day'))
hour = property(_timestamp_date_part('hour'))
minute = property(_timestamp_date_part('minute'))
second = property(_timestamp_date_part('second'))
class IPField(BigIntegerField):
def db_value(self, val):
if val is not None:
return struct.unpack('!I', socket.inet_aton(val))[0]
def python_value(self, val):
if val is not None:
return socket.inet_ntoa(struct.pack('!I', val))
class BooleanField(Field):
"""
布尔类型的字段
"""
field_type = 'BOOL'
adapt = bool
class BareField(Field):
def __init__(self, adapt=None, *args, **kwargs):
super(BareField, self).__init__(*args, **kwargs)
if adapt is not None:
self.adapt = adapt
def ddl_datatype(self, ctx):
return
class ForeignKeyField(Field):
"""
外键类型的字段
"""
accessor_class = ForeignKeyAccessor
backref_accessor_class = BackrefAccessor
def __init__(self, model, field=None, backref=None, on_delete=None,
on_update=None, deferrable=None, _deferred=None,
rel_model=None, to_field=None, object_id_name=None,
lazy_load=True, constraint_name=None, related_name=None,
*args, **kwargs):
kwargs.setdefault('index', True)
super(ForeignKeyField, self).__init__(*args, **kwargs)
if rel_model is not None:
__deprecated__('"rel_model" has been deprecated in favor of '
'"model" for ForeignKeyField objects.')
model = rel_model
if to_field is not None:
__deprecated__('"to_field" has been deprecated in favor of '
'"field" for ForeignKeyField objects.')
field = to_field
if related_name is not None:
__deprecated__('"related_name" has been deprecated in favor of '
'"backref" for Field objects.')
backref = related_name
self._is_self_reference = model == 'self'
self.rel_model = model
self.rel_field = field
self.declared_backref = backref
self.backref = None
self.on_delete = on_delete
self.on_update = on_update
self.deferrable = deferrable
self.deferred = _deferred
self.object_id_name = object_id_name
self.lazy_load = lazy_load
self.constraint_name = constraint_name
@property
def field_type(self):
if not isinstance(self.rel_field, AutoField):
return self.rel_field.field_type
elif isinstance(self.rel_field, BigAutoField):
return BigIntegerField.field_type
return IntegerField.field_type
def get_modifiers(self):
if not isinstance(self.rel_field, AutoField):
return self.rel_field.get_modifiers()
return super(ForeignKeyField, self).get_modifiers()
def adapt(self, value):
return self.rel_field.adapt(value)
def db_value(self, value):
if isinstance(value, self.rel_model):
value = getattr(value, self.rel_field.name)
return self.rel_field.db_value(value)
def python_value(self, value):
if isinstance(value, self.rel_model):
return value
return self.rel_field.python_value(value)
def bind(self, model, name, set_attribute=True):
if not self.column_name:
self.column_name = name if name.endswith('_id') else name + '_id'
if not self.object_id_name:
self.object_id_name = self.column_name
if self.object_id_name == name:
self.object_id_name += '_id'
elif self.object_id_name == name:
raise ValueError('ForeignKeyField "%s"."%s" specifies an '
'object_id_name that conflicts with its field '
'name.' % (model._meta.name, name))
if self._is_self_reference:
self.rel_model = model
if isinstance(self.rel_field, basestring):
self.rel_field = getattr(self.rel_model, self.rel_field)
elif self.rel_field is None:
self.rel_field = self.rel_model._meta.primary_key
# Bind field before assigning backref, so field is bound when
# calling declared_backref() (if callable).
super(ForeignKeyField, self).bind(model, name, set_attribute)
self.safe_name = self.object_id_name
if callable_(self.declared_backref):
self.backref = self.declared_backref(self)
else:
self.backref, self.declared_backref = self.declared_backref, None
if not self.backref:
self.backref = '%s_set' % model._meta.name
if set_attribute:
setattr(model, self.object_id_name, ObjectIdAccessor(self))
if self.backref not in '!+':
setattr(self.rel_model, self.backref,
self.backref_accessor_class(self))
def foreign_key_constraint(self):
parts = []
if self.constraint_name:
parts.extend((SQL('CONSTRAINT'), Entity(self.constraint_name)))
parts.extend([
SQL('FOREIGN KEY'),
EnclosedNodeList((self,)),
SQL('REFERENCES'),
self.rel_model,
EnclosedNodeList((self.rel_field,))])
if self.on_delete:
parts.append(SQL('ON DELETE %s' % self.on_delete))
if self.on_update:
parts.append(SQL('ON UPDATE %s' % self.on_update))
if self.deferrable:
parts.append(SQL('DEFERRABLE %s' % self.deferrable))
return NodeList(parts)
def __getattr__(self, attr):
if attr.startswith('__'):
# Prevent recursion error when deep-copying.
raise AttributeError('Cannot look-up non-existant "__" methods.')
if attr in self.rel_model._meta.fields:
return self.rel_model._meta.fields[attr]
raise AttributeError('Foreign-key has no attribute %s, nor is it a '
'valid field on the related model.' % attr)
class DeferredForeignKey(Field):
_unresolved = set()
def __init__(self, rel_model_name, **kwargs):
self.field_kwargs = kwargs
self.rel_model_name = rel_model_name.lower()
DeferredForeignKey._unresolved.add(self)
super(DeferredForeignKey, self).__init__(
column_name=kwargs.get('column_name'),
null=kwargs.get('null'),
primary_key=kwargs.get('primary_key'))
__hash__ = object.__hash__
def __deepcopy__(self, memo=None):
return DeferredForeignKey(self.rel_model_name, **self.field_kwargs)
def set_model(self, rel_model):
field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs)
if field.primary_key:
# NOTE: this calls add_field() under-the-hood.
self.model._meta.set_primary_key(self.name, field)
else:
self.model._meta.add_field(self.name, field)
@staticmethod
def resolve(model_cls):
unresolved = sorted(DeferredForeignKey._unresolved,
key=operator.attrgetter('_order'))
for dr in unresolved:
if dr.rel_model_name == model_cls.__name__.lower():
dr.set_model(model_cls)
DeferredForeignKey._unresolved.discard(dr)
class DeferredThroughModel(object):
def __init__(self):
self._refs = []
def set_field(self, model, field, name):
self._refs.append((model, field, name))
def set_model(self, through_model):
for src_model, m2mfield, name in self._refs:
m2mfield.through_model = through_model
src_model._meta.add_field(name, m2mfield)
class MetaField(Field):
column_name = default = model = name = None
primary_key = False
class ManyToManyFieldAccessor(FieldAccessor):
def __init__(self, model, field, name):
super(ManyToManyFieldAccessor, self).__init__(model, field, name)
self.model = field.model
self.rel_model = field.rel_model
self.through_model = field.through_model
src_fks = self.through_model._meta.model_refs[self.model]
dest_fks = self.through_model._meta.model_refs[self.rel_model]
if not src_fks:
raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' %
(self.model, self.through_model))
elif not dest_fks:
raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' %
(self.rel_model, self.through_model))
self.src_fk = src_fks[0]
self.dest_fk = dest_fks[0]
def __get__(self, instance, instance_type=None, force_query=False):
if instance is not None:
if not force_query and self.src_fk.backref != '+':
backref = getattr(instance, self.src_fk.backref)
if isinstance(backref, list):
return [getattr(obj, self.dest_fk.name) for obj in backref]
src_id = getattr(instance, self.src_fk.rel_field.name)
return (ManyToManyQuery(instance, self, self.rel_model)
.join(self.through_model)
.join(self.model)
.where(self.src_fk == src_id))
return self.field
def __set__(self, instance, value):
query = self.__get__(instance, force_query=True)
query.add(value, clear_existing=True)
class ManyToManyField(MetaField):
accessor_class = ManyToManyFieldAccessor
def __init__(self, model, backref=None, through_model=None, on_delete=None,
on_update=None, _is_backref=False):
if through_model is not None:
if not (isinstance(through_model, DeferredThroughModel) or
is_model(through_model)):
raise TypeError('Unexpected value for through_model. Expected '
'Model or DeferredThroughModel.')
if not _is_backref and (on_delete is not None or on_update is not None):
raise ValueError('Cannot specify on_delete or on_update when '
'through_model is specified.')
self.rel_model = model
self.backref = backref
self._through_model = through_model
self._on_delete = on_delete
self._on_update = on_update
self._is_backref = _is_backref
def _get_descriptor(self):
return ManyToManyFieldAccessor(self)
def bind(self, model, name, set_attribute=True):
if isinstance(self._through_model, DeferredThroughModel):
self._through_model.set_field(model, self, name)
return
super(ManyToManyField, self).bind(model, name, set_attribute)
if not self._is_backref:
many_to_many_field = ManyToManyField(
self.model,
backref=name,
through_model=self.through_model,
on_delete=self._on_delete,
on_update=self._on_update,
_is_backref=True)
self.backref = self.backref or model._meta.name + 's'
self.rel_model._meta.add_field(self.backref, many_to_many_field)
def get_models(self):
return [model for _, model in sorted((
(self._is_backref, self.model),
(not self._is_backref, self.rel_model)))]
@property
def through_model(self):
if self._through_model is None:
self._through_model = self._create_through_model()
return self._through_model
@through_model.setter
def through_model(self, value):
self._through_model = value
def _create_through_model(self):
lhs, rhs = self.get_models()
tables = [model._meta.table_name for model in (lhs, rhs)]
class Meta:
database = self.model._meta.database
schema = self.model._meta.schema
table_name = '%s_%s_through' % tuple(tables)
indexes = (
((lhs._meta.name, rhs._meta.name),
True),)
params = {'on_delete': self._on_delete, 'on_update': self._on_update}
attrs = {
lhs._meta.name: ForeignKeyField(lhs, **params),
rhs._meta.name: ForeignKeyField(rhs, **params),
'Meta': Meta}
klass_name = '%s%sThrough' % (lhs.__name__, rhs.__name__)
return type(klass_name, (Model,), attrs)
def get_through_model(self):
# XXX: Deprecated. Just use the "through_model" property.
return self.through_model
class VirtualField(MetaField):
field_class = None
def __init__(self, field_class=None, *args, **kwargs):
Field = field_class if field_class is not None else self.field_class
self.field_instance = Field() if Field is not None else None
super(VirtualField, self).__init__(*args, **kwargs)
def db_value(self, value):
if self.field_instance is not None:
return self.field_instance.db_value(value)
return value
def python_value(self, value):
if self.field_instance is not None:
return self.field_instance.python_value(value)
return value
def bind(self, model, name, set_attribute=True):
self.model = model
self.column_name = self.name = self.safe_name = name
setattr(model, name, self.accessor_class(model, self, name))
class CompositeKey(MetaField):
sequence = None
def __init__(self, *field_names):
self.field_names = field_names
self._safe_field_names = None
@property
def safe_field_names(self):
if self._safe_field_names is None:
if self.model is None:
return self.field_names
self._safe_field_names = [self.model._meta.fields[f].safe_name
for f in self.field_names]
return self._safe_field_names
def __get__(self, instance, instance_type=None):
if instance is not None:
return tuple([getattr(instance, f) for f in self.safe_field_names])
return self
def __set__(self, instance, value):
if not isinstance(value, (list, tuple)):
raise TypeError('A list or tuple must be used to set the value of '
'a composite primary key.')
if len(value) != len(self.field_names):
raise ValueError('The length of the value must equal the number '
'of columns of the composite primary key.')
for idx, field_value in enumerate(value):
setattr(instance, self.field_names[idx], field_value)
def __eq__(self, other):
expressions = [(self.model._meta.fields[field] == value)
for field, value in zip(self.field_names, other)]
return reduce(operator.and_, expressions)
def __ne__(self, other):
return ~(self == other)
def __hash__(self):
return hash((self.model.__name__, self.field_names))
def __sql__(self, ctx):
# If the composite PK is being selected, do not use parens. Elsewhere,
# such as in an expression, we want to use parentheses and treat it as
# a row value.
parens = ctx.scope != SCOPE_SOURCE
return ctx.sql(NodeList([self.model._meta.fields[field]
for field in self.field_names], ', ', parens))
def bind(self, model, name, set_attribute=True):
self.model = model
self.column_name = self.name = self.safe_name = name
setattr(model, self.name, self)
class _SortedFieldList(object):
__slots__ = ('_keys', '_items')
def __init__(self):
self._keys = []
self._items = []
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __contains__(self, item):
k = item._sort_key
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, field):
return self._keys.index(field._sort_key)
def insert(self, item):
k = item._sort_key
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
idx = self.index(item)
del self._items[idx]
del self._keys[idx]
# MODELS
class SchemaManager(object):
def __init__(self, model, database=None, **context_options):
self.model = model
self._database = database
context_options.setdefault('scope', SCOPE_VALUES)
self.context_options = context_options
@property
def database(self):
db = self._database or self.model._meta.database
if db is None:
raise ImproperlyConfigured('database attribute does not appear to '
'be set on the model: %s' % self.model)
return db
@database.setter
def database(self, value):
self._database = value
def _create_context(self):
return self.database.get_sql_context(**self.context_options)
def _create_table(self, safe=True, **options):
is_temp = options.pop('temporary', False)
ctx = self._create_context()
ctx.literal('CREATE TEMPORARY TABLE ' if is_temp else 'CREATE TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
ctx.sql(self.model).literal(' ')
columns = []
constraints = []
meta = self.model._meta
if meta.composite_key:
pk_columns = [meta.fields[field_name].column
for field_name in meta.primary_key.field_names]
constraints.append(NodeList((SQL('PRIMARY KEY'),
EnclosedNodeList(pk_columns))))
for field in meta.sorted_fields:
columns.append(field.ddl(ctx))
if isinstance(field, ForeignKeyField) and not field.deferred:
constraints.append(field.foreign_key_constraint())
if meta.constraints:
constraints.extend(meta.constraints)
constraints.extend(self._create_table_option_sql(options))
ctx.sql(EnclosedNodeList(columns + constraints))
if meta.table_settings is not None:
table_settings = ensure_tuple(meta.table_settings)
for setting in table_settings:
if not isinstance(setting, basestring):
raise ValueError('table_settings must be strings')
ctx.literal(' ').literal(setting)
extra_opts = []
if meta.strict_tables: extra_opts.append('STRICT')
if meta.without_rowid: extra_opts.append('WITHOUT ROWID')
if extra_opts:
ctx.literal(' %s' % ', '.join(extra_opts))
return ctx
def _create_table_option_sql(self, options):
accum = []
options = merge_dict(self.model._meta.options or {}, options)
if not options:
return accum
for key, value in sorted(options.items()):
if not isinstance(value, Node):
if is_model(value):
value = value._meta.table
else:
value = SQL(str(value))
accum.append(NodeList((SQL(key), value), glue='='))
return accum
def create_table(self, safe=True, **options):
self.database.execute(self._create_table(safe=safe, **options))
def _create_table_as(self, table_name, query, safe=True, **meta):
ctx = (self._create_context()
.literal('CREATE TEMPORARY TABLE '
if meta.get('temporary') else 'CREATE TABLE '))
if safe:
ctx.literal('IF NOT EXISTS ')
return (ctx
.sql(Entity(*ensure_tuple(table_name)))
.literal(' AS ')
.sql(query))
def create_table_as(self, table_name, query, safe=True, **meta):
ctx = self._create_table_as(table_name, query, safe=safe, **meta)
self.database.execute(ctx)
def _drop_table(self, safe=True, **options):
ctx = (self._create_context()
.literal('DROP TABLE IF EXISTS ' if safe else 'DROP TABLE ')
.sql(self.model))
if options.get('cascade'):
ctx = ctx.literal(' CASCADE')
elif options.get('restrict'):
ctx = ctx.literal(' RESTRICT')
return ctx
def drop_table(self, safe=True, **options):
self.database.execute(self._drop_table(safe=safe, **options))
def _truncate_table(self, restart_identity=False, cascade=False):
db = self.database
if not db.truncate_table:
return (self._create_context()
.literal('DELETE FROM ').sql(self.model))
ctx = self._create_context().literal('TRUNCATE TABLE ').sql(self.model)
if restart_identity:
ctx = ctx.literal(' RESTART IDENTITY')
if cascade:
ctx = ctx.literal(' CASCADE')
return ctx
def truncate_table(self, restart_identity=False, cascade=False):
self.database.execute(self._truncate_table(restart_identity, cascade))
def _create_indexes(self, safe=True):
return [self._create_index(index, safe)
for index in self.model._meta.fields_to_index()]
def _create_index(self, index, safe=True):
if isinstance(index, Index):
if not self.database.safe_create_index:
index = index.safe(False)
elif index._safe != safe:
index = index.safe(safe)
if isinstance(self._database, SqliteDatabase):
# Ensure we do not use value placeholders with Sqlite, as they
# are not supported.
index = ValueLiterals(index)
return self._create_context().sql(index)
def create_indexes(self, safe=True):
for query in self._create_indexes(safe=safe):
self.database.execute(query)
def _drop_indexes(self, safe=True):
return [self._drop_index(index, safe)
for index in self.model._meta.fields_to_index()
if isinstance(index, Index)]
def _drop_index(self, index, safe):
statement = 'DROP INDEX '
if safe and self.database.safe_drop_index:
statement += 'IF EXISTS '
if isinstance(index._table, Table) and index._table._schema:
index_name = Entity(index._table._schema, index._name)
else:
index_name = Entity(index._name)
return (self
._create_context()
.literal(statement)
.sql(index_name))
def drop_indexes(self, safe=True):
for query in self._drop_indexes(safe=safe):
self.database.execute(query)
def _check_sequences(self, field):
if not field.sequence or not self.database.sequences:
raise ValueError('Sequences are either not supported, or are not '
'defined for "%s".' % field.name)
def _sequence_for_field(self, field):
if field.model._meta.schema:
return Entity(field.model._meta.schema, field.sequence)
else:
return Entity(field.sequence)
def _create_sequence(self, field):
self._check_sequences(field)
if not self.database.sequence_exists(field.sequence):
return (self
._create_context()
.literal('CREATE SEQUENCE ')
.sql(self._sequence_for_field(field)))
def create_sequence(self, field):
seq_ctx = self._create_sequence(field)
if seq_ctx is not None:
self.database.execute(seq_ctx)
def _drop_sequence(self, field):
self._check_sequences(field)
if self.database.sequence_exists(field.sequence):
return (self
._create_context()
.literal('DROP SEQUENCE ')
.sql(self._sequence_for_field(field)))
def drop_sequence(self, field):
seq_ctx = self._drop_sequence(field)
if seq_ctx is not None:
self.database.execute(seq_ctx)
def _create_foreign_key(self, field):
name = 'fk_%s_%s_refs_%s' % (field.model._meta.table_name,
field.column_name,
field.rel_model._meta.table_name)
return (self
._create_context()
.literal('ALTER TABLE ')
.sql(field.model)
.literal(' ADD CONSTRAINT ')
.sql(Entity(_truncate_constraint_name(name)))
.literal(' ')
.sql(field.foreign_key_constraint()))
def create_foreign_key(self, field):
self.database.execute(self._create_foreign_key(field))
def create_sequences(self):
if self.database.sequences:
for field in self.model._meta.sorted_fields:
if field.sequence:
self.create_sequence(field)
def create_all(self, safe=True, **table_options):
self.create_sequences()
self.create_table(safe, **table_options)
self.create_indexes(safe=safe)
def drop_sequences(self):
if self.database.sequences:
for field in self.model._meta.sorted_fields:
if field.sequence:
self.drop_sequence(field)
def drop_all(self, safe=True, drop_sequences=True, **options):
self.drop_table(safe, **options)
if drop_sequences:
self.drop_sequences()
class Metadata(object):
def __init__(self, model, database=None, table_name=None, indexes=None,
primary_key=None, constraints=None, schema=None,
only_save_dirty=False, depends_on=None, options=None,
db_table=None, table_function=None, table_settings=None,
without_rowid=False, temporary=False, strict_tables=None,
legacy_table_names=True, **kwargs):
if db_table is not None:
__deprecated__('"db_table" has been deprecated in favor of '
'"table_name" for Models.')
table_name = db_table
self.model = model
self.database = database
self.fields = {}
self.columns = {}
self.combined = {}
self._sorted_field_list = _SortedFieldList()
self.sorted_fields = []
self.sorted_field_names = []
self.defaults = {}
self._default_by_name = {}
self._default_dict = {}
self._default_callables = {}
self._default_callable_list = []
self.name = model.__name__.lower()
self.table_function = table_function
self.legacy_table_names = legacy_table_names
if not table_name:
table_name = (self.table_function(model)
if self.table_function
else self.make_table_name())
self.table_name = table_name
self._table = None
self.indexes = list(indexes) if indexes else []
self.constraints = constraints
self._schema = schema
self.primary_key = primary_key
self.composite_key = self.auto_increment = None
self.only_save_dirty = only_save_dirty
self.depends_on = depends_on
self.table_settings = table_settings
self.without_rowid = without_rowid
self.strict_tables = strict_tables
self.temporary = temporary
self.refs = {}
self.backrefs = {}
self.model_refs = collections.defaultdict(list)
self.model_backrefs = collections.defaultdict(list)
self.manytomany = {}
self.options = options or {}
for key, value in kwargs.items():
setattr(self, key, value)
self._additional_keys = set(kwargs.keys())
# Allow objects to register hooks that are called if the model is bound
# to a different database. For example, BlobField uses a different
# Python data-type depending on the db driver / python version. When
# the database changes, we need to update any BlobField so they can use
# the appropriate data-type.
self._db_hooks = []
def make_table_name(self):
if self.legacy_table_names:
return re.sub(r'[^\w]+', '_', self.name)
return make_snake_case(self.model.__name__)
def model_graph(self, refs=True, backrefs=True, depth_first=True):
if not refs and not backrefs:
raise ValueError('One of `refs` or `backrefs` must be True.')
accum = [(None, self.model, None)]
seen = set()
queue = collections.deque((self,))
method = queue.pop if depth_first else queue.popleft
while queue:
curr = method()
if curr in seen: continue
seen.add(curr)
if refs:
for fk, model in curr.refs.items():
accum.append((fk, model, False))
queue.append(model._meta)
if backrefs:
for fk, model in curr.backrefs.items():
accum.append((fk, model, True))
queue.append(model._meta)
return accum
def add_ref(self, field):
rel = field.rel_model
self.refs[field] = rel
self.model_refs[rel].append(field)
rel._meta.backrefs[field] = self.model
rel._meta.model_backrefs[self.model].append(field)
def remove_ref(self, field):
rel = field.rel_model
del self.refs[field]
self.model_refs[rel].remove(field)
del rel._meta.backrefs[field]
rel._meta.model_backrefs[self.model].remove(field)
def add_manytomany(self, field):
self.manytomany[field.name] = field
def remove_manytomany(self, field):
del self.manytomany[field.name]
@property
def table(self):
if self._table is None:
self._table = Table(
self.table_name,
[field.column_name for field in self.sorted_fields],
schema=self.schema,
_model=self.model,
_database=self.database)
return self._table
@table.setter
def table(self, value):
raise AttributeError('Cannot set the "table".')
@table.deleter
def table(self):
self._table = None
@property
def schema(self):
return self._schema
@schema.setter
def schema(self, value):
self._schema = value
del self.table
@property
def entity(self):
if self._schema:
return Entity(self._schema, self.table_name)
else:
return Entity(self.table_name)
def _update_sorted_fields(self):
self.sorted_fields = list(self._sorted_field_list)
self.sorted_field_names = [f.name for f in self.sorted_fields]
def get_rel_for_model(self, model):
if isinstance(model, ModelAlias):
model = model.model
forwardrefs = self.model_refs.get(model, [])
backrefs = self.model_backrefs.get(model, [])
return (forwardrefs, backrefs)
def add_field(self, field_name, field, set_attribute=True):
if field_name in self.fields:
self.remove_field(field_name)
elif field_name in self.manytomany:
self.remove_manytomany(self.manytomany[field_name])
if not isinstance(field, MetaField):
del self.table
field.bind(self.model, field_name, set_attribute)
self.fields[field.name] = field
self.columns[field.column_name] = field
self.combined[field.name] = field
self.combined[field.column_name] = field
self._sorted_field_list.insert(field)
self._update_sorted_fields()
if field.default is not None:
# This optimization helps speed up model instance construction.
self.defaults[field] = field.default
if callable_(field.default):
self._default_callables[field] = field.default
self._default_callable_list.append((field.name,
field.default))
else:
self._default_dict[field] = field.default
self._default_by_name[field.name] = field.default
else:
field.bind(self.model, field_name, set_attribute)
if isinstance(field, ForeignKeyField):
self.add_ref(field)
elif isinstance(field, ManyToManyField) and field.name:
self.add_manytomany(field)
def remove_field(self, field_name):
if field_name not in self.fields:
return
del self.table
original = self.fields.pop(field_name)
del self.columns[original.column_name]
del self.combined[field_name]
try:
del self.combined[original.column_name]
except KeyError:
pass
self._sorted_field_list.remove(original)
self._update_sorted_fields()
if original.default is not None:
del self.defaults[original]
if self._default_callables.pop(original, None):
for i, (name, _) in enumerate(self._default_callable_list):
if name == field_name:
self._default_callable_list.pop(i)
break
else:
self._default_dict.pop(original, None)
self._default_by_name.pop(original.name, None)
if isinstance(original, ForeignKeyField):
self.remove_ref(original)
def set_primary_key(self, name, field):
self.composite_key = isinstance(field, CompositeKey)
self.add_field(name, field)
self.primary_key = field
self.auto_increment = (
field.auto_increment or
bool(field.sequence))
def get_primary_keys(self):
if self.composite_key:
return tuple([self.fields[field_name]
for field_name in self.primary_key.field_names])
else:
return (self.primary_key,) if self.primary_key is not False else ()
def get_default_dict(self):
dd = self._default_by_name.copy()
for field_name, default in self._default_callable_list:
dd[field_name] = default()
return dd
def fields_to_index(self):
indexes = []
for f in self.sorted_fields:
if f.primary_key:
continue
if f.index or f.unique:
indexes.append(ModelIndex(self.model, (f,), unique=f.unique,
using=f.index_type))
for index_obj in self.indexes:
if isinstance(index_obj, Node):
indexes.append(index_obj)
elif isinstance(index_obj, (list, tuple)):
index_parts, unique = index_obj
fields = []
for part in index_parts:
if isinstance(part, basestring):
fields.append(self.combined[part])
elif isinstance(part, Node):
fields.append(part)
else:
raise ValueError('Expected either a field name or a '
'subclass of Node. Got: %s' % part)
indexes.append(ModelIndex(self.model, fields, unique=unique))
return indexes
def set_database(self, database):
self.database = database
self.model._schema._database = database
del self.table
# Apply any hooks that have been registered. If we have an
# uninitialized proxy object, we will treat that as `None`.
if isinstance(database, Proxy) and database.obj is None:
database = None
for hook in self._db_hooks:
hook(database)
def set_table_name(self, table_name):
self.table_name = table_name
del self.table
class SubclassAwareMetadata(Metadata):
models = []
def __init__(self, model, *args, **kwargs):
super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs)
self.models.append(model)
def map_models(self, fn):
for model in self.models:
fn(model)
class DoesNotExist(Exception): pass
class ModelBase(type):
inheritable = set(['constraints', 'database', 'indexes', 'primary_key',
'options', 'schema', 'table_function', 'temporary',
'only_save_dirty', 'legacy_table_names',
'table_settings', 'strict_tables'])
def __new__(cls, name, bases, attrs):
if name == MODEL_BASE or bases[0].__name__ == MODEL_BASE:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
meta_options = {}
meta = attrs.pop('Meta', None)
if meta:
for k, v in meta.__dict__.items():
if not k.startswith('_'):
meta_options[k] = v
pk = getattr(meta, 'primary_key', None)
pk_name = parent_pk = None
# Inherit any field descriptors by deep copying the underlying field
# into the attrs of the new model, additionally see if the bases define
# inheritable model options and swipe them.
for b in bases:
if not hasattr(b, '_meta'):
continue
base_meta = b._meta
if parent_pk is None:
parent_pk = deepcopy(base_meta.primary_key)
all_inheritable = cls.inheritable | base_meta._additional_keys
for k in base_meta.__dict__:
if k in all_inheritable and k not in meta_options:
meta_options[k] = base_meta.__dict__[k]
meta_options.setdefault('database', base_meta.database)
meta_options.setdefault('schema', base_meta.schema)
for (k, v) in b.__dict__.items():
if k in attrs: continue
if isinstance(v, FieldAccessor) and not v.field.primary_key:
attrs[k] = deepcopy(v.field)
sopts = meta_options.pop('schema_options', None) or {}
Meta = meta_options.get('model_metadata_class', Metadata)
Schema = meta_options.get('schema_manager_class', SchemaManager)
# Construct the new class.
cls = super(ModelBase, cls).__new__(cls, name, bases, attrs)
cls.__data__ = cls.__rel__ = None
cls._meta = Meta(cls, **meta_options)
cls._schema = Schema(cls, **sopts)
fields = []
for key, value in cls.__dict__.items():
if isinstance(value, Field):
if value.primary_key and pk:
raise ValueError('over-determined primary key %s.' % name)
elif value.primary_key:
pk, pk_name = value, key
else:
fields.append((key, value))
if pk is None:
if parent_pk is not False:
pk, pk_name = ((parent_pk, parent_pk.name)
if parent_pk is not None else
(AutoField(), 'id'))
else:
pk = False
elif isinstance(pk, CompositeKey):
pk_name = '__composite_key__'
cls._meta.composite_key = True
if pk is not False:
cls._meta.set_primary_key(pk_name, pk)
for name, field in fields:
cls._meta.add_field(name, field)
# Create a repr and error class before finalizing.
if hasattr(cls, '__str__') and '__repr__' not in attrs:
setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
cls.__name__, self.__str__()))
exc_name = '%sDoesNotExist' % cls.__name__
exc_attrs = {'__module__': cls.__module__}
exception_class = type(exc_name, (DoesNotExist,), exc_attrs)
cls.DoesNotExist = exception_class
# Call validation hook, allowing additional model validation.
cls.validate_model()
DeferredForeignKey.resolve(cls)
return cls
def __repr__(self):
return '<Model: %s>' % self.__name__
def __iter__(self):
return iter(self.select())
def __getitem__(self, key):
return self.get_by_id(key)
def __setitem__(self, key, value):
self.set_by_id(key, value)
def __delitem__(self, key):
self.delete_by_id(key)
def __contains__(self, key):
try:
self.get_by_id(key)
except self.DoesNotExist:
return False
else:
return True
def __len__(self):
return self.select().count()
def __bool__(self):
return True
__nonzero__ = __bool__ # Python 2.
def __sql__(self, ctx):
return ctx.sql(self._meta.table)
class _BoundModelsContext(_callable_context_manager):
def __init__(self, models, database, bind_refs, bind_backrefs):
self.models = models
self.database = database
self.bind_refs = bind_refs
self.bind_backrefs = bind_backrefs
def __enter__(self):
self._orig_database = []
for model in self.models:
self._orig_database.append(model._meta.database)
model.bind(self.database, self.bind_refs, self.bind_backrefs,
_exclude=set(self.models))
return self.models
def __exit__(self, exc_type, exc_val, exc_tb):
for model, db in zip(self.models, self._orig_database):
model.bind(db, self.bind_refs, self.bind_backrefs,
_exclude=set(self.models))
class Model(with_metaclass(ModelBase, Node)):
def __init__(self, *args, **kwargs):
if kwargs.pop('__no_default__', None):
self.__data__ = {}
else:
self.__data__ = self._meta.get_default_dict()
self._dirty = set(self.__data__)
self.__rel__ = {}
for k in kwargs:
setattr(self, k, kwargs[k])
def __str__(self):
return str(self._pk) if self._meta.primary_key is not False else 'n/a'
@classmethod
def validate_model(cls):
pass
@classmethod
def alias(cls, alias=None):
return ModelAlias(cls, alias)
@classmethod
def select(cls, *fields):
is_default = not fields
if not fields:
fields = cls._meta.sorted_fields
return ModelSelect(cls, fields, is_default=is_default)
@classmethod
def _normalize_data(cls, data, kwargs):
normalized = {}
if data:
if not isinstance(data, dict):
if kwargs:
raise ValueError('Data cannot be mixed with keyword '
'arguments: %s' % data)
return data
for key in data:
try:
field = (key if isinstance(key, Field)
else cls._meta.combined[key])
except KeyError:
if not isinstance(key, Node):
raise ValueError('Unrecognized field name: "%s" in %s.'
% (key, data))
field = key
normalized[field] = data[key]
if kwargs:
for key in kwargs:
try:
normalized[cls._meta.combined[key]] = kwargs[key]
except KeyError:
normalized[getattr(cls, key)] = kwargs[key]
return normalized
@classmethod
def update(cls, __data=None, **update):
return ModelUpdate(cls, cls._normalize_data(__data, update))
@classmethod
def insert(cls, __data=None, **insert):
return ModelInsert(cls, cls._normalize_data(__data, insert))
@classmethod
def insert_many(cls, rows, fields=None):
return ModelInsert(cls, insert=rows, columns=fields)
@classmethod
def insert_from(cls, query, fields):
columns = [getattr(cls, field) if isinstance(field, basestring)
else field for field in fields]
return ModelInsert(cls, insert=query, columns=columns)
@classmethod
def replace(cls, __data=None, **insert):
return cls.insert(__data, **insert).on_conflict('REPLACE')
@classmethod
def replace_many(cls, rows, fields=None):
return (cls
.insert_many(rows=rows, fields=fields)
.on_conflict('REPLACE'))
@classmethod
def raw(cls, sql, *params):
return ModelRaw(cls, sql, params)
@classmethod
def delete(cls):
return ModelDelete(cls)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
return inst
@classmethod
def bulk_create(cls, model_list, batch_size=None):
if batch_size is not None:
batches = chunked(model_list, batch_size)
else:
batches = [model_list]
field_names = list(cls._meta.sorted_field_names)
if cls._meta.auto_increment:
pk_name = cls._meta.primary_key.name
field_names.remove(pk_name)
if cls._meta.database.returning_clause and \
cls._meta.primary_key is not False:
pk_fields = cls._meta.get_primary_keys()
else:
pk_fields = None
fields = [cls._meta.fields[field_name] for field_name in field_names]
attrs = []
for field in fields:
if isinstance(field, ForeignKeyField):
attrs.append(field.object_id_name)
else:
attrs.append(field.name)
for batch in batches:
accum = ([getattr(model, f) for f in attrs]
for model in batch)
res = cls.insert_many(accum, fields=fields).execute()
if pk_fields and res is not None:
for row, model in zip(res, batch):
for (pk_field, obj_id) in zip(pk_fields, row):
setattr(model, pk_field.name, obj_id)
@classmethod
def bulk_update(cls, model_list, fields, batch_size=None):
if isinstance(cls._meta.primary_key, CompositeKey):
raise ValueError('bulk_update() is not supported for models with '
'a composite primary key.')
# First normalize list of fields so all are field instances.
fields = [cls._meta.fields[f] if isinstance(f, basestring) else f
for f in fields]
# Now collect list of attribute names to use for values.
attrs = [field.object_id_name if isinstance(field, ForeignKeyField)
else field.name for field in fields]
if batch_size is not None:
batches = chunked(model_list, batch_size)
else:
batches = [model_list]
n = 0
pk = cls._meta.primary_key
for batch in batches:
id_list = [model._pk for model in batch]
update = {}
for field, attr in zip(fields, attrs):
accum = []
for model in batch:
value = getattr(model, attr)
if not isinstance(value, Node):
value = field.to_value(value)
accum.append((pk.to_value(model._pk), value))
case = Case(pk, accum)
update[field] = case
n += (cls.update(update)
.where(cls._meta.primary_key.in_(id_list))
.execute())
return n
@classmethod
def noop(cls):
return NoopModelSelect(cls, ())
@classmethod
def get(cls, *query, **filters):
sq = cls.select()
if query:
# Handle simple lookup using just the primary key.
if len(query) == 1 and isinstance(query[0], int):
sq = sq.where(cls._meta.primary_key == query[0])
else:
sq = sq.where(*query)
if filters:
sq = sq.filter(**filters)
return sq.get()
@classmethod
def get_or_none(cls, *query, **filters):
try:
return cls.get(*query, **filters)
except DoesNotExist:
pass
@classmethod
def get_by_id(cls, pk):
return cls.get(cls._meta.primary_key == pk)
@classmethod
def set_by_id(cls, key, value):
if key is None:
return cls.insert(value).execute()
else:
return (cls.update(value)
.where(cls._meta.primary_key == key).execute())
@classmethod
def delete_by_id(cls, pk):
return cls.delete().where(cls._meta.primary_key == pk).execute()
@classmethod
def get_or_create(cls, **kwargs):
defaults = kwargs.pop('defaults', {})
query = cls.select()
for field, value in kwargs.items():
query = query.where(getattr(cls, field) == value)
try:
return query.get(), False
except cls.DoesNotExist:
try:
if defaults:
kwargs.update(defaults)
with cls._meta.database.atomic():
return cls.create(**kwargs), True
except IntegrityError as exc:
try:
return query.get(), False
except cls.DoesNotExist:
raise exc
@classmethod
def filter(cls, *dq_nodes, **filters):
return cls.select().filter(*dq_nodes, **filters)
def get_id(self):
# Using getattr(self, pk-name) could accidentally trigger a query if
# the primary-key is a foreign-key. So we use the safe_name attribute,
# which defaults to the field-name, but will be the object_id_name for
# foreign-key fields.
if self._meta.primary_key is not False:
return getattr(self, self._meta.primary_key.safe_name)
_pk = property(get_id)
@_pk.setter
def _pk(self, value):
setattr(self, self._meta.primary_key.name, value)
def _pk_expr(self):
return self._meta.primary_key == self._pk
def _prune_fields(self, field_dict, only):
new_data = {}
for field in only:
if isinstance(field, basestring):
field = self._meta.combined[field]
if field.name in field_dict:
new_data[field.name] = field_dict[field.name]
return new_data
def _populate_unsaved_relations(self, field_dict):
for foreign_key_field in self._meta.refs:
foreign_key = foreign_key_field.name
conditions = (
foreign_key in field_dict and
field_dict[foreign_key] is None and
self.__rel__.get(foreign_key) is not None)
if conditions:
setattr(self, foreign_key, getattr(self, foreign_key))
field_dict[foreign_key] = self.__data__[foreign_key]
def save(self, force_insert=False, only=None):
field_dict = self.__data__.copy()
if self._meta.primary_key is not False:
pk_field = self._meta.primary_key
pk_value = self._pk
else:
pk_field = pk_value = None
if only is not None:
field_dict = self._prune_fields(field_dict, only)
elif self._meta.only_save_dirty and not force_insert:
field_dict = self._prune_fields(field_dict, self.dirty_fields)
if not field_dict:
self._dirty.clear()
return False
self._populate_unsaved_relations(field_dict)
rows = 1
if self._meta.auto_increment and pk_value is None:
field_dict.pop(pk_field.name, None)
if pk_value is not None and not force_insert:
if self._meta.composite_key:
for pk_part_name in pk_field.field_names:
field_dict.pop(pk_part_name, None)
else:
field_dict.pop(pk_field.name, None)
if not field_dict:
raise ValueError('no data to save!')
rows = self.update(**field_dict).where(self._pk_expr()).execute()
elif pk_field is not None:
pk = self.insert(**field_dict).execute()
if pk is not None and (self._meta.auto_increment or
pk_value is None):
self._pk = pk
# Although we set the primary-key, do not mark it as dirty.
self._dirty.discard(pk_field.name)
else:
self.insert(**field_dict).execute()
self._dirty -= set(field_dict) # Remove any fields we saved.
return rows
def is_dirty(self):
return bool(self._dirty)
@property
def dirty_fields(self):
return [f for f in self._meta.sorted_fields if f.name in self._dirty]
def dependencies(self, search_nullable=False):
model_class = type(self)
stack = [(type(self), None)]
seen = set()
while stack:
klass, query = stack.pop()
if klass in seen:
continue
seen.add(klass)
for fk, rel_model in klass._meta.backrefs.items():
if rel_model is model_class or query is None:
node = (fk == self.__data__[fk.rel_field.name])
else:
node = fk << query
subquery = (rel_model.select(rel_model._meta.primary_key)
.where(node))
if not fk.null or search_nullable:
stack.append((rel_model, subquery))
yield (node, fk)
def delete_instance(self, recursive=False, delete_nullable=False):
if recursive:
dependencies = self.dependencies(delete_nullable)
for query, fk in reversed(list(dependencies)):
model = fk.model
if fk.null and not delete_nullable:
model.update(**{fk.name: None}).where(query).execute()
else:
model.delete().where(query).execute()
return type(self).delete().where(self._pk_expr()).execute()
def __hash__(self):
return hash((self.__class__, self._pk))
def __eq__(self, other):
return (
other.__class__ == self.__class__ and
self._pk is not None and
self._pk == other._pk)
def __ne__(self, other):
return not self == other
def __sql__(self, ctx):
# NOTE: when comparing a foreign-key field whose related-field is not a
# primary-key, then doing an equality test for the foreign-key with a
# model instance will return the wrong value; since we would return
# the primary key for a given model instance.
#
# This checks to see if we have a converter in the scope, and that we
# are converting a foreign-key expression. If so, we hand the model
# instance to the converter rather than blindly grabbing the primary-
# key. In the event the provided converter fails to handle the model
# instance, then we will return the primary-key.
if ctx.state.converter is not None and ctx.state.is_fk_expr:
try:
return ctx.sql(Value(self, converter=ctx.state.converter))
except (TypeError, ValueError):
pass
return ctx.sql(Value(getattr(self, self._meta.primary_key.name),
converter=self._meta.primary_key.db_value))
@classmethod
def bind(cls, database, bind_refs=True, bind_backrefs=True, _exclude=None):
is_different = cls._meta.database is not database
cls._meta.set_database(database)
if bind_refs or bind_backrefs:
if _exclude is None:
_exclude = set()
G = cls._meta.model_graph(refs=bind_refs, backrefs=bind_backrefs)
for _, model, is_backref in G:
if model not in _exclude:
model._meta.set_database(database)
_exclude.add(model)
return is_different
@classmethod
def bind_ctx(cls, database, bind_refs=True, bind_backrefs=True):
return _BoundModelsContext((cls,), database, bind_refs, bind_backrefs)
@classmethod
def table_exists(cls):
M = cls._meta
return cls._schema.database.table_exists(M.table.__name__, M.schema)
@classmethod
def create_table(cls, safe=True, **options):
if 'fail_silently' in options:
__deprecated__('"fail_silently" has been deprecated in favor of '
'"safe" for the create_table() method.')
safe = options.pop('fail_silently')
if safe and not cls._schema.database.safe_create_index \
and cls.table_exists():
return
if cls._meta.temporary:
options.setdefault('temporary', cls._meta.temporary)
cls._schema.create_all(safe, **options)
@classmethod
def drop_table(cls, safe=True, drop_sequences=True, **options):
if safe and not cls._schema.database.safe_drop_index \
and not cls.table_exists():
return
if cls._meta.temporary:
options.setdefault('temporary', cls._meta.temporary)
cls._schema.drop_all(safe, drop_sequences, **options)
@classmethod
def truncate_table(cls, **options):
cls._schema.truncate_table(**options)
@classmethod
def index(cls, *fields, **kwargs):
return ModelIndex(cls, fields, **kwargs)
@classmethod
def add_index(cls, *fields, **kwargs):
if len(fields) == 1 and isinstance(fields[0], (SQL, Index)):
cls._meta.indexes.append(fields[0])
else:
cls._meta.indexes.append(ModelIndex(cls, fields, **kwargs))
class ModelAlias(Node):
"""Provide a separate reference to a model in a query."""
def __init__(self, model, alias=None):
self.__dict__['model'] = model
self.__dict__['alias'] = alias
def __getattr__(self, attr):
# Hack to work-around the fact that properties or other objects
# implementing the descriptor protocol (on the model being aliased),
# will not work correctly when we use getattr(). So we explicitly pass
# the model alias to the descriptor's getter.
try:
obj = self.model.__dict__[attr]
except KeyError:
pass
else:
if isinstance(obj, ModelDescriptor):
return obj.__get__(None, self)
model_attr = getattr(self.model, attr)
if isinstance(model_attr, Field):
self.__dict__[attr] = FieldAlias.create(self, model_attr)
return self.__dict__[attr]
return model_attr
def __setattr__(self, attr, value):
raise AttributeError('Cannot set attributes on model aliases.')
def get_field_aliases(self):
return [getattr(self, n) for n in self.model._meta.sorted_field_names]
def select(self, *selection):
if not selection:
selection = self.get_field_aliases()
return ModelSelect(self, selection)
def __call__(self, **kwargs):
return self.model(**kwargs)
def __sql__(self, ctx):
if ctx.scope == SCOPE_VALUES:
# Return the quoted table name.
return ctx.sql(self.model)
if self.alias:
ctx.alias_manager[self] = self.alias
if ctx.scope == SCOPE_SOURCE:
# Define the table and its alias.
return (ctx
.sql(self.model._meta.entity)
.literal(' AS ')
.sql(Entity(ctx.alias_manager[self])))
else:
# Refer to the table using the alias.
return ctx.sql(Entity(ctx.alias_manager[self]))
class FieldAlias(Field):
def __init__(self, source, field):
self.source = source
self.model = source.model
self.field = field
@classmethod
def create(cls, source, field):
class _FieldAlias(cls, type(field)):
pass
return _FieldAlias(source, field)
def clone(self):
return FieldAlias(self.source, self.field)
def adapt(self, value): return self.field.adapt(value)
def python_value(self, value): return self.field.python_value(value)
def db_value(self, value): return self.field.db_value(value)
def __getattr__(self, attr):
return self.source if attr == 'model' else getattr(self.field, attr)
def __sql__(self, ctx):
return ctx.sql(Column(self.source, self.field.column_name))
def sort_models(models):
models = set(models)
seen = set()
ordering = []
def dfs(model):
if model in models and model not in seen:
seen.add(model)
for foreign_key, rel_model in model._meta.refs.items():
# Do not depth-first search deferred foreign-keys as this can
# cause tables to be created in the incorrect order.
if not foreign_key.deferred:
dfs(rel_model)
if model._meta.depends_on:
for dependency in model._meta.depends_on:
dfs(dependency)
ordering.append(model)
names = lambda m: (m._meta.name, m._meta.table_name)
for m in sorted(models, key=names):
dfs(m)
return ordering
class _ModelQueryHelper(object):
default_row_type = ROW.MODEL
def __init__(self, *args, **kwargs):
super(_ModelQueryHelper, self).__init__(*args, **kwargs)
if not self._database:
self._database = self.model._meta.database
@Node.copy
def objects(self, constructor=None):
self._row_type = ROW.CONSTRUCTOR
self._constructor = self.model if constructor is None else constructor
def _get_cursor_wrapper(self, cursor):
row_type = self._row_type or self.default_row_type
if row_type == ROW.MODEL:
return self._get_model_cursor_wrapper(cursor)
elif row_type == ROW.DICT:
return ModelDictCursorWrapper(cursor, self.model, self._returning)
elif row_type == ROW.TUPLE:
return ModelTupleCursorWrapper(cursor, self.model, self._returning)
elif row_type == ROW.NAMED_TUPLE:
return ModelNamedTupleCursorWrapper(cursor, self.model,
self._returning)
elif row_type == ROW.CONSTRUCTOR:
return ModelObjectCursorWrapper(cursor, self.model,
self._returning, self._constructor)
else:
raise ValueError('Unrecognized row type: "%s".' % row_type)
def _get_model_cursor_wrapper(self, cursor):
return ModelObjectCursorWrapper(cursor, self.model, [], self.model)
class ModelRaw(_ModelQueryHelper, RawQuery):
def __init__(self, model, sql, params, **kwargs):
self.model = model
self._returning = ()
super(ModelRaw, self).__init__(sql=sql, params=params, **kwargs)
def get(self):
try:
return self.execute()[0]
except IndexError:
sql, params = self.sql()
raise self.model.DoesNotExist('%s instance matching query does '
'not exist:\nSQL: %s\nParams: %s' %
(self.model, sql, params))
class BaseModelSelect(_ModelQueryHelper):
def union_all(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs)
__add__ = union_all
def union(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs)
__or__ = union
def intersect(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs)
__and__ = intersect
def except_(self, rhs):
return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs)
__sub__ = except_
def __iter__(self):
if not self._cursor_wrapper:
self.execute()
return iter(self._cursor_wrapper)
def prefetch(self, *subqueries):
return prefetch(self, *subqueries)
def get(self, database=None):
clone = self.paginate(1, 1)
clone._cursor_wrapper = None
try:
return clone.execute(database)[0]
except IndexError:
sql, params = clone.sql()
raise self.model.DoesNotExist('%s instance matching query does '
'not exist:\nSQL: %s\nParams: %s' %
(clone.model, sql, params))
def get_or_none(self, database=None):
try:
return self.get(database=database)
except self.model.DoesNotExist:
pass
@Node.copy
def group_by(self, *columns):
grouping = []
for column in columns:
if is_model(column):
grouping.extend(column._meta.sorted_fields)
elif isinstance(column, Table):
if not column._columns:
raise ValueError('Cannot pass a table to group_by() that '
'does not have columns explicitly '
'declared.')
grouping.extend([getattr(column, col_name)
for col_name in column._columns])
else:
grouping.append(column)
self._group_by = grouping
class ModelCompoundSelectQuery(BaseModelSelect, CompoundSelectQuery):
def __init__(self, model, *args, **kwargs):
self.model = model
super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs)
def _get_model_cursor_wrapper(self, cursor):
return self.lhs._get_model_cursor_wrapper(cursor)
def _normalize_model_select(fields_or_models):
fields = []
for fm in fields_or_models:
if is_model(fm):
fields.extend(fm._meta.sorted_fields)
elif isinstance(fm, ModelAlias):
fields.extend(fm.get_field_aliases())
elif isinstance(fm, Table) and fm._columns:
fields.extend([getattr(fm, col) for col in fm._columns])
else:
fields.append(fm)
return fields
class ModelSelect(BaseModelSelect, Select):
def __init__(self, model, fields_or_models, is_default=False):
self.model = self._join_ctx = model
self._joins = {}
self._is_default = is_default
fields = _normalize_model_select(fields_or_models)
super(ModelSelect, self).__init__([model], fields)
def clone(self):
clone = super(ModelSelect, self).clone()
if clone._joins:
clone._joins = dict(clone._joins)
return clone
def select(self, *fields_or_models):
if fields_or_models or not self._is_default:
self._is_default = False
fields = _normalize_model_select(fields_or_models)
return super(ModelSelect, self).select(*fields)
return self
def select_extend(self, *columns):
self._is_default = False
fields = _normalize_model_select(columns)
return super(ModelSelect, self).select_extend(*fields)
def switch(self, ctx=None):
self._join_ctx = self.model if ctx is None else ctx
return self
def _get_model(self, src):
if is_model(src):
return src, True
elif isinstance(src, Table) and src._model:
return src._model, False
elif isinstance(src, ModelAlias):
return src.model, False
elif isinstance(src, ModelSelect):
return src.model, False
return None, False
def _normalize_join(self, src, dest, on, attr):
# Allow "on" expression to have an alias that determines the
# destination attribute for the joined data.
on_alias = isinstance(on, Alias)
if on_alias:
attr = attr or on._alias
on = on.alias()
# Obtain references to the source and destination models being joined.
src_model, src_is_model = self._get_model(src)
dest_model, dest_is_model = self._get_model(dest)
if src_model and dest_model:
self._join_ctx = dest
constructor = dest_model
# In the case where the "on" clause is a Column or Field, we will
# convert that field into the appropriate predicate expression.
if not (src_is_model and dest_is_model) and isinstance(on, Column):
if on.source is src:
to_field = src_model._meta.columns[on.name]
elif on.source is dest:
to_field = dest_model._meta.columns[on.name]
else:
raise AttributeError('"on" clause Column %s does not '
'belong to %s or %s.' %
(on, src_model, dest_model))
on = None
elif isinstance(on, Field):
to_field = on
on = None
else:
to_field = None
fk_field, is_backref = self._generate_on_clause(
src_model, dest_model, to_field, on)
if on is None:
src_attr = 'name' if src_is_model else 'column_name'
dest_attr = 'name' if dest_is_model else 'column_name'
if is_backref:
lhs = getattr(dest, getattr(fk_field, dest_attr))
rhs = getattr(src, getattr(fk_field.rel_field, src_attr))
else:
lhs = getattr(src, getattr(fk_field, src_attr))
rhs = getattr(dest, getattr(fk_field.rel_field, dest_attr))
on = (lhs == rhs)
if not attr:
if fk_field is not None and not is_backref:
attr = fk_field.name
else:
attr = dest_model._meta.name
elif on_alias and fk_field is not None and \
attr == fk_field.object_id_name and not is_backref:
raise ValueError('Cannot assign join alias to "%s", as this '
'attribute is the object_id_name for the '
'foreign-key field "%s"' % (attr, fk_field))
elif isinstance(dest, Source):
constructor = dict
attr = attr or dest._alias
if not attr and isinstance(dest, Table):
attr = attr or dest.__name__
return (on, attr, constructor)
def _generate_on_clause(self, src, dest, to_field=None, on=None):
meta = src._meta
is_backref = fk_fields = False
# Get all the foreign keys between source and dest, and determine if
# the join is via a back-reference.
if dest in meta.model_refs:
fk_fields = meta.model_refs[dest]
elif dest in meta.model_backrefs:
fk_fields = meta.model_backrefs[dest]
is_backref = True
if not fk_fields:
if on is not None:
return None, False
raise ValueError('Unable to find foreign key between %s and %s. '
'Please specify an explicit join condition.' %
(src, dest))
elif to_field is not None:
# If the foreign-key field was specified explicitly, remove all
# other foreign-key fields from the list.
target = (to_field.field if isinstance(to_field, FieldAlias)
else to_field)
fk_fields = [f for f in fk_fields if (
(f is target) or
(is_backref and f.rel_field is to_field))]
if len(fk_fields) == 1:
return fk_fields[0], is_backref
if on is None:
# If multiple foreign-keys exist, try using the FK whose name
# matches that of the related model. If not, raise an error as this
# is ambiguous.
for fk in fk_fields:
if fk.name == dest._meta.name:
return fk, is_backref
raise ValueError('More than one foreign key between %s and %s.'
' Please specify which you are joining on.' %
(src, dest))
# If there are multiple foreign-keys to choose from and the join
# predicate is an expression, we'll try to figure out which
# foreign-key field we're joining on so that we can assign to the
# correct attribute when resolving the model graph.
to_field = None
if isinstance(on, Expression):
lhs, rhs = on.lhs, on.rhs
# Coerce to set() so that we force Python to compare using the
# object's hash rather than equality test, which returns a
# false-positive due to overriding __eq__.
fk_set = set(fk_fields)
if isinstance(lhs, Field):
lhs_f = lhs.field if isinstance(lhs, FieldAlias) else lhs
if lhs_f in fk_set:
to_field = lhs_f
elif isinstance(rhs, Field):
rhs_f = rhs.field if isinstance(rhs, FieldAlias) else rhs
if rhs_f in fk_set:
to_field = rhs_f
return to_field, False
@Node.copy
def join(self, dest, join_type=JOIN.INNER, on=None, src=None, attr=None):
src = self._join_ctx if src is None else src
if join_type == JOIN.LATERAL or join_type == JOIN.LEFT_LATERAL:
on = True
elif join_type != JOIN.CROSS:
on, attr, constructor = self._normalize_join(src, dest, on, attr)
if attr:
self._joins.setdefault(src, [])
self._joins[src].append((dest, attr, constructor, join_type))
elif on is not None:
raise ValueError('Cannot specify on clause with cross join.')
if not self._from_list:
raise ValueError('No sources to join on.')
item = self._from_list.pop()
self._from_list.append(Join(item, dest, join_type, on))
def left_outer_join(self, dest, on=None, src=None, attr=None):
return self.join(dest, JOIN.LEFT_OUTER, on, src, attr)
def join_from(self, src, dest, join_type=JOIN.INNER, on=None, attr=None):
return self.join(dest, join_type, on, src, attr)
def _get_model_cursor_wrapper(self, cursor):
if len(self._from_list) == 1 and not self._joins:
return ModelObjectCursorWrapper(cursor, self.model,
self._returning, self.model)
return ModelCursorWrapper(cursor, self.model, self._returning,
self._from_list, self._joins)
def ensure_join(self, lm, rm, on=None, **join_kwargs):
join_ctx = self._join_ctx
for dest, _, constructor, _ in self._joins.get(lm, []):
if dest == rm:
return self
return self.switch(lm).join(rm, on=on, **join_kwargs).switch(join_ctx)
def convert_dict_to_node(self, qdict):
accum = []
joins = []
fks = (ForeignKeyField, BackrefAccessor)
for key, value in sorted(qdict.items()):
curr = self.model
if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
key, op = key.rsplit('__', 1)
op = DJANGO_MAP[op]
elif value is None:
op = DJANGO_MAP['is']
else:
op = DJANGO_MAP['eq']
if '__' not in key:
# Handle simplest case. This avoids joining over-eagerly when a
# direct FK lookup is all that is required.
model_attr = getattr(curr, key)
else:
for piece in key.split('__'):
for dest, attr, _, _ in self._joins.get(curr, ()):
try:
model_attr = getattr(curr, piece, None)
except:
pass
if attr == piece or (isinstance(dest, ModelAlias) and
dest.alias == piece):
curr = dest
break
else:
model_attr = getattr(curr, piece)
if value is not None and isinstance(model_attr, fks):
curr = model_attr.rel_model
joins.append(model_attr)
accum.append(op(model_attr, value))
return accum, joins
def filter(self, *args, **kwargs):
# normalize args and kwargs into a new expression
if args and kwargs:
dq_node = (reduce(operator.and_, [a.clone() for a in args]) &
DQ(**kwargs))
elif args:
dq_node = (reduce(operator.and_, [a.clone() for a in args]) &
ColumnBase())
elif kwargs:
dq_node = DQ(**kwargs) & ColumnBase()
else:
return self.clone()
# dq_node should now be an Expression, lhs = Node(), rhs = ...
q = collections.deque([dq_node])
dq_joins = []
seen_joins = set()
while q:
curr = q.popleft()
if not isinstance(curr, Expression):
continue
for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
if isinstance(piece, DQ):
query, joins = self.convert_dict_to_node(piece.query)
for join in joins:
if join not in seen_joins:
dq_joins.append(join)
seen_joins.add(join)
expression = reduce(operator.and_, query)
# Apply values from the DQ object.
if piece._negated:
expression = Negated(expression)
# expression._alias = piece._alias
setattr(curr, side, expression)
else:
q.append(piece)
if not args or not kwargs:
dq_node = dq_node.lhs
query = self.clone()
for field in dq_joins:
if isinstance(field, ForeignKeyField):
lm, rm = field.model, field.rel_model
field_obj = field
elif isinstance(field, BackrefAccessor):
lm, rm = field.model, field.rel_model
field_obj = field.field
query = query.ensure_join(lm, rm, field_obj)
return query.where(dq_node)
def create_table(self, name, safe=True, **meta):
return self.model._schema.create_table_as(name, self, safe, **meta)
def __sql_selection__(self, ctx, is_subquery=False):
if self._is_default and is_subquery and len(self._returning) > 1 and \
self.model._meta.primary_key is not False:
return ctx.sql(self.model._meta.primary_key)
return ctx.sql(CommaNodeList(self._returning))
class NoopModelSelect(ModelSelect):
def __sql__(self, ctx):
return self.model._meta.database.get_noop_select(ctx)
def _get_cursor_wrapper(self, cursor):
return CursorWrapper(cursor)
class _ModelWriteQueryHelper(_ModelQueryHelper):
def __init__(self, model, *args, **kwargs):
self.model = model
super(_ModelWriteQueryHelper, self).__init__(model, *args, **kwargs)
def returning(self, *returning):
accum = []
for item in returning:
if is_model(item):
accum.extend(item._meta.sorted_fields)
else:
accum.append(item)
return super(_ModelWriteQueryHelper, self).returning(*accum)
def _set_table_alias(self, ctx):
table = self.model._meta.table
ctx.alias_manager[table] = table.__name__
class ModelUpdate(_ModelWriteQueryHelper, Update):
pass
class ModelInsert(_ModelWriteQueryHelper, Insert):
default_row_type = ROW.TUPLE
def __init__(self, *args, **kwargs):
super(ModelInsert, self).__init__(*args, **kwargs)
if self._returning is None and self.model._meta.database is not None:
if self.model._meta.database.returning_clause:
self._returning = self.model._meta.get_primary_keys()
def returning(self, *returning):
# By default ModelInsert will yield a `tuple` containing the
# primary-key of the newly inserted row. But if we are explicitly
# specifying a returning clause and have not set a row type, we will
# default to returning model instances instead.
if returning and self._row_type is None:
self._row_type = ROW.MODEL
return super(ModelInsert, self).returning(*returning)
def get_default_data(self):
return self.model._meta.defaults
def get_default_columns(self):
fields = self.model._meta.sorted_fields
return fields[1:] if self.model._meta.auto_increment else fields
class ModelDelete(_ModelWriteQueryHelper, Delete):
pass
class ManyToManyQuery(ModelSelect):
def __init__(self, instance, accessor, rel, *args, **kwargs):
self._instance = instance
self._accessor = accessor
self._src_attr = accessor.src_fk.rel_field.name
self._dest_attr = accessor.dest_fk.rel_field.name
super(ManyToManyQuery, self).__init__(rel, (rel,), *args, **kwargs)
def _id_list(self, model_or_id_list):
if isinstance(model_or_id_list[0], Model):
return [getattr(obj, self._dest_attr) for obj in model_or_id_list]
return model_or_id_list
def add(self, value, clear_existing=False):
if clear_existing:
self.clear()
accessor = self._accessor
src_id = getattr(self._instance, self._src_attr)
if isinstance(value, SelectQuery):
query = value.columns(
Value(src_id),
accessor.dest_fk.rel_field)
accessor.through_model.insert_from(
fields=[accessor.src_fk, accessor.dest_fk],
query=query).execute()
else:
value = ensure_tuple(value)
if not value: return
inserts = [{
accessor.src_fk.name: src_id,
accessor.dest_fk.name: rel_id}
for rel_id in self._id_list(value)]
accessor.through_model.insert_many(inserts).execute()
def remove(self, value):
src_id = getattr(self._instance, self._src_attr)
if isinstance(value, SelectQuery):
column = getattr(value.model, self._dest_attr)
subquery = value.columns(column)
return (self._accessor.through_model
.delete()
.where(
(self._accessor.dest_fk << subquery) &
(self._accessor.src_fk == src_id))
.execute())
else:
value = ensure_tuple(value)
if not value:
return
return (self._accessor.through_model
.delete()
.where(
(self._accessor.dest_fk << self._id_list(value)) &
(self._accessor.src_fk == src_id))
.execute())
def clear(self):
src_id = getattr(self._instance, self._src_attr)
return (self._accessor.through_model
.delete()
.where(self._accessor.src_fk == src_id)
.execute())
def safe_python_value(conv_func):
def validate(value):
try:
return conv_func(value)
except (TypeError, ValueError):
return value
return validate
class BaseModelCursorWrapper(DictCursorWrapper):
def __init__(self, cursor, model, columns):
super(BaseModelCursorWrapper, self).__init__(cursor)
self.model = model
self.select = columns or []
def _initialize_columns(self):
combined = self.model._meta.combined
table = self.model._meta.table
description = self.cursor.description
self.ncols = len(self.cursor.description)
self.columns = []
self.converters = converters = [None] * self.ncols
self.fields = fields = [None] * self.ncols
for idx, description_item in enumerate(description):
column = orig_column = description_item[0]
# Try to clean-up messy column descriptions when people do not
# provide an alias. The idea is that we take something like:
# SUM("t1"."price") -> "price") -> price
dot_index = column.rfind('.')
if dot_index != -1:
column = column[dot_index + 1:]
column = column.strip('()"`')
self.columns.append(column)
# Now we'll see what they selected and see if we can improve the
# column-name being returned - e.g. by mapping it to the selected
# field's name.
try:
raw_node = self.select[idx]
except IndexError:
if column in combined:
raw_node = node = combined[column]
else:
continue
else:
node = raw_node.unwrap()
# If this column was given an alias, then we will use whatever
# alias was returned by the cursor.
is_alias = raw_node.is_alias()
if is_alias:
self.columns[idx] = orig_column
# Heuristics used to attempt to get the field associated with a
# given SELECT column, so that we can accurately convert the value
# returned by the database-cursor into a Python object.
if isinstance(node, Field):
if raw_node._coerce:
converters[idx] = node.python_value
fields[idx] = node
if not is_alias:
self.columns[idx] = node.name
elif isinstance(node, ColumnBase) and raw_node._converter:
converters[idx] = raw_node._converter
elif isinstance(node, Function) and node._coerce:
if node._python_value is not None:
converters[idx] = node._python_value
elif node.arguments and isinstance(node.arguments[0], Node):
# If the first argument is a field or references a column
# on a Model, try using that field's conversion function.
# This usually works, but we use "safe_python_value()" so
# that if a TypeError or ValueError occurs during
# conversion we can just fall-back to the raw cursor value.
first = node.arguments[0].unwrap()
if isinstance(first, Entity):
path = first._path[-1] # Try to look-up by name.
first = combined.get(path)
if isinstance(first, Field):
converters[idx] = safe_python_value(first.python_value)
elif column in combined:
if node._coerce:
converters[idx] = combined[column].python_value
if isinstance(node, Column) and node.source == table:
fields[idx] = combined[column]
initialize = _initialize_columns
def process_row(self, row):
raise NotImplementedError
class ModelDictCursorWrapper(BaseModelCursorWrapper):
def process_row(self, row):
result = {}
columns, converters = self.columns, self.converters
fields = self.fields
for i in range(self.ncols):
attr = columns[i]
if attr in result: continue # Don't overwrite if we have dupes.
if converters[i] is not None:
result[attr] = converters[i](row[i])
else:
result[attr] = row[i]
return result
class ModelTupleCursorWrapper(ModelDictCursorWrapper):
constructor = tuple
def process_row(self, row):
columns, converters = self.columns, self.converters
return self.constructor([
(converters[i](row[i]) if converters[i] is not None else row[i])
for i in range(self.ncols)])
class ModelNamedTupleCursorWrapper(ModelTupleCursorWrapper):
def initialize(self):
self._initialize_columns()
attributes = []
for i in range(self.ncols):
attributes.append(self.columns[i])
self.tuple_class = collections.namedtuple('Row', attributes)
self.constructor = lambda row: self.tuple_class(*row)
class ModelObjectCursorWrapper(ModelDictCursorWrapper):
def __init__(self, cursor, model, select, constructor):
self.constructor = constructor
self.is_model = is_model(constructor)
super(ModelObjectCursorWrapper, self).__init__(cursor, model, select)
def process_row(self, row):
data = super(ModelObjectCursorWrapper, self).process_row(row)
if self.is_model:
# Clear out any dirty fields before returning to the user.
obj = self.constructor(__no_default__=1, **data)
obj._dirty.clear()
return obj
else:
return self.constructor(**data)
class ModelCursorWrapper(BaseModelCursorWrapper):
def __init__(self, cursor, model, select, from_list, joins):
super(ModelCursorWrapper, self).__init__(cursor, model, select)
self.from_list = from_list
self.joins = joins
def initialize(self):
self._initialize_columns()
selected_src = set([field.model for field in self.fields
if field is not None])
select, columns = self.select, self.columns
self.key_to_constructor = {self.model: self.model}
self.src_is_dest = {}
self.src_to_dest = []
accum = collections.deque(self.from_list)
dests = set()
while accum:
curr = accum.popleft()
if isinstance(curr, Join):
accum.append(curr.lhs)
accum.append(curr.rhs)
continue
if curr not in self.joins:
continue
is_dict = isinstance(curr, dict)
for key, attr, constructor, join_type in self.joins[curr]:
if key not in self.key_to_constructor:
self.key_to_constructor[key] = constructor
# (src, attr, dest, is_dict, join_type).
self.src_to_dest.append((curr, attr, key, is_dict,
join_type))
dests.add(key)
accum.append(key)
# Ensure that we accommodate everything selected.
for src in selected_src:
if src not in self.key_to_constructor:
if is_model(src):
self.key_to_constructor[src] = src
elif isinstance(src, ModelAlias):
self.key_to_constructor[src] = src.model
# Indicate which sources are also dests.
for src, _, dest, _, _ in self.src_to_dest:
self.src_is_dest[src] = src in dests and (dest in selected_src
or src in selected_src)
self.column_keys = []
for idx, node in enumerate(select):
key = self.model
field = self.fields[idx]
if field is not None:
if isinstance(field, FieldAlias):
key = field.source
else:
key = field.model
else:
if isinstance(node, Node):
node = node.unwrap()
if isinstance(node, Column):
key = node.source
self.column_keys.append(key)
def process_row(self, row):
objects = {}
object_list = []
for key, constructor in self.key_to_constructor.items():
objects[key] = constructor(__no_default__=True)
object_list.append(objects[key])
default_instance = objects[self.model]
set_keys = set()
for idx, key in enumerate(self.column_keys):
# Get the instance corresponding to the selected column/value,
# falling back to the "root" model instance.
instance = objects.get(key, default_instance)
column = self.columns[idx]
value = row[idx]
if value is not None:
set_keys.add(key)
if self.converters[idx]:
value = self.converters[idx](value)
if isinstance(instance, dict):
instance[column] = value
else:
setattr(instance, column, value)
# Need to do some analysis on the joins before this.
for (src, attr, dest, is_dict, join_type) in self.src_to_dest:
instance = objects[src]
try:
joined_instance = objects[dest]
except KeyError:
continue
# If no fields were set on the destination instance then do not
# assign an "empty" instance.
if instance is None or dest is None or \
(dest not in set_keys and not self.src_is_dest.get(dest)):
continue
# If no fields were set on either the source or the destination,
# then we have nothing to do here.
if instance not in set_keys and dest not in set_keys \
and join_type.endswith('OUTER JOIN'):
continue
if is_dict:
instance[attr] = joined_instance
else:
setattr(instance, attr, joined_instance)
# When instantiating models from a cursor, we clear the dirty fields.
for instance in object_list:
if isinstance(instance, Model):
instance._dirty.clear()
return objects[self.model]
class PrefetchQuery(collections.namedtuple('_PrefetchQuery', (
'query', 'fields', 'is_backref', 'rel_models', 'field_to_name', 'model'))):
def __new__(cls, query, fields=None, is_backref=None, rel_models=None,
field_to_name=None, model=None):
if fields:
if is_backref:
if rel_models is None:
rel_models = [field.model for field in fields]
foreign_key_attrs = [field.rel_field.name for field in fields]
else:
if rel_models is None:
rel_models = [field.rel_model for field in fields]
foreign_key_attrs = [field.name for field in fields]
field_to_name = list(zip(fields, foreign_key_attrs))
model = query.model
return super(PrefetchQuery, cls).__new__(
cls, query, fields, is_backref, rel_models, field_to_name, model)
def populate_instance(self, instance, id_map):
if self.is_backref:
for field in self.fields:
identifier = instance.__data__[field.name]
key = (field, identifier)
if key in id_map:
setattr(instance, field.name, id_map[key])
else:
for field, attname in self.field_to_name:
identifier = instance.__data__[field.rel_field.name]
key = (field, identifier)
rel_instances = id_map.get(key, [])
for inst in rel_instances:
setattr(inst, attname, instance)
inst._dirty.clear()
setattr(instance, field.backref, rel_instances)
def store_instance(self, instance, id_map):
for field, attname in self.field_to_name:
identity = field.rel_field.python_value(instance.__data__[attname])
key = (field, identity)
if self.is_backref:
id_map[key] = instance
else:
id_map.setdefault(key, [])
id_map[key].append(instance)
def prefetch_add_subquery(sq, subqueries):
fixed_queries = [PrefetchQuery(sq)]
for i, subquery in enumerate(subqueries):
if isinstance(subquery, tuple):
subquery, target_model = subquery
else:
target_model = None
if not isinstance(subquery, Query) and is_model(subquery) or \
isinstance(subquery, ModelAlias):
subquery = subquery.select()
subquery_model = subquery.model
fks = backrefs = None
for j in reversed(range(i + 1)):
fixed = fixed_queries[j]
last_query = fixed.query
last_model = last_obj = fixed.model
if isinstance(last_model, ModelAlias):
last_model = last_model.model
rels = subquery_model._meta.model_refs.get(last_model, [])
if rels:
fks = [getattr(subquery_model, fk.name) for fk in rels]
pks = [getattr(last_obj, fk.rel_field.name) for fk in rels]
else:
backrefs = subquery_model._meta.model_backrefs.get(last_model)
if (fks or backrefs) and ((target_model is last_obj) or
(target_model is None)):
break
if not fks and not backrefs:
tgt_err = ' using %s' % target_model if target_model else ''
raise AttributeError('Error: unable to find foreign key for '
'query: %s%s' % (subquery, tgt_err))
dest = (target_model,) if target_model else None
if fks:
expr = reduce(operator.or_, [
(fk << last_query.select(pk))
for (fk, pk) in zip(fks, pks)])
subquery = subquery.where(expr)
fixed_queries.append(PrefetchQuery(subquery, fks, False, dest))
elif backrefs:
expressions = []
for backref in backrefs:
rel_field = getattr(subquery_model, backref.rel_field.name)
fk_field = getattr(last_obj, backref.name)
expressions.append(rel_field << last_query.select(fk_field))
subquery = subquery.where(reduce(operator.or_, expressions))
fixed_queries.append(PrefetchQuery(subquery, backrefs, True, dest))
return fixed_queries
def prefetch(sq, *subqueries):
if not subqueries:
return sq
fixed_queries = prefetch_add_subquery(sq, subqueries)
deps = {}
rel_map = {}
for pq in reversed(fixed_queries):
query_model = pq.model
if pq.fields:
for rel_model in pq.rel_models:
rel_map.setdefault(rel_model, [])
rel_map[rel_model].append(pq)
deps.setdefault(query_model, {})
id_map = deps[query_model]
has_relations = bool(rel_map.get(query_model))
for instance in pq.query:
if pq.fields:
pq.store_instance(instance, id_map)
if has_relations:
for rel in rel_map[query_model]:
rel.populate_instance(instance, deps[rel.model])
return list(pq.query) | zdppy-orm | /zdppy_orm-0.1.4-py3-none-any.whl/zdppy_orm/__init__.py | __init__.py |
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import re
import logging; log = logging.getLogger(__name__)
import threading
import time
from warnings import warn
# site
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.exc import ExpectedStringError, ExpectedTypeError, PasslibConfigWarning
from zdppy_password_hash.registry import get_crypt_handler, _validate_handler_name
from zdppy_password_hash.utils import (handlers as uh, to_bytes,
to_unicode, splitcomma,
as_bool, timer, rng, getrandstr,
)
from zdppy_password_hash.utils.binary import BASE64_CHARS
from zdppy_password_hash.utils.compat import (iteritems, num_types, irange,
PY2, PY3, unicode, SafeConfigParser,
NativeStringIO, BytesIO,
unicode_or_bytes_types, native_string_types,
)
from zdppy_password_hash.utils.decor import deprecated_method, memoized_property
# local
__all__ = [
'CryptContext',
'LazyCryptContext',
'CryptPolicy',
]
#=============================================================================
# support
#=============================================================================
# private object to detect unset params
_UNSET = object()
def _coerce_vary_rounds(value):
"""parse vary_rounds string to percent as [0,1) float, or integer"""
if value.endswith("%"):
# XXX: deprecate this in favor of raw float?
return float(value.rstrip("%"))*.01
try:
return int(value)
except ValueError:
return float(value)
# set of options which aren't allowed to be set via policy
_forbidden_scheme_options = set(["salt"])
# 'salt' - not allowed since a fixed salt would defeat the purpose.
# dict containing funcs used to coerce strings to correct type for scheme option keys.
# NOTE: this isn't really needed any longer, since Handler.using() handles the actual parsing.
# keeping this around for now, though, since it makes context.to_dict() output cleaner.
_coerce_scheme_options = dict(
min_rounds=int,
max_rounds=int,
default_rounds=int,
vary_rounds=_coerce_vary_rounds,
salt_size=int,
)
def _is_handler_registered(handler):
"""detect if handler is registered or a custom handler"""
return get_crypt_handler(handler.name, None) is handler
@staticmethod
def _always_needs_update(hash, secret=None):
"""
dummy function patched into handler.needs_update() by _CryptConfig
when hash alg has been deprecated for context.
"""
return True
#: list of keys allowed under wildcard "all" scheme w/o a security warning.
_global_settings = set(["truncate_error", "vary_rounds"])
#=============================================================================
# crypt policy
#=============================================================================
_preamble = ("The CryptPolicy class has been deprecated as of "
"Passlib 1.6, and will be removed in Passlib 1.8. ")
class CryptPolicy(object):
"""
.. deprecated:: 1.6
This class has been deprecated, and will be removed in Passlib 1.8.
All of its functionality has been rolled into :class:`CryptContext`.
This class previously stored the configuration options for the
CryptContext class. In the interest of interface simplification,
all of this class' functionality has been rolled into the CryptContext
class itself.
The documentation for this class is now focused on documenting how to
migrate to the new api. Additionally, where possible, the deprecation
warnings issued by the CryptPolicy methods will list the replacement call
that should be used.
Constructors
============
CryptPolicy objects can be constructed directly using any of
the keywords accepted by :class:`CryptContext`. Direct uses of the
:class:`!CryptPolicy` constructor should either pass the keywords
directly into the CryptContext constructor, or to :meth:`CryptContext.update`
if the policy object was being used to update an existing context object.
In addition to passing in keywords directly,
CryptPolicy objects can be constructed by the following methods:
.. automethod:: from_path
.. automethod:: from_string
.. automethod:: from_source
.. automethod:: from_sources
.. automethod:: replace
Introspection
=============
All of the informational methods provided by this class have been deprecated
by identical or similar methods in the :class:`CryptContext` class:
.. automethod:: has_schemes
.. automethod:: schemes
.. automethod:: iter_handlers
.. automethod:: get_handler
.. automethod:: get_options
.. automethod:: handler_is_deprecated
.. automethod:: get_min_verify_time
Exporting
=========
.. automethod:: iter_config
.. automethod:: to_dict
.. automethod:: to_file
.. automethod:: to_string
.. note::
CryptPolicy are immutable.
Use the :meth:`replace` method to mutate existing instances.
.. deprecated:: 1.6
"""
#===================================================================
# class methods
#===================================================================
@classmethod
def from_path(cls, path, section="zdppy_password_hash", encoding="utf-8"):
"""create a CryptPolicy instance from a local file.
.. deprecated:: 1.6
Creating a new CryptContext from a file, which was previously done via
``CryptContext(policy=CryptPolicy.from_path(path))``, can now be
done via ``CryptContext.from_path(path)``.
See :meth:`CryptContext.from_path` for details.
Updating an existing CryptContext from a file, which was previously done
``context.policy = CryptPolicy.from_path(path)``, can now be
done via ``context.load_path(path)``.
See :meth:`CryptContext.load_path` for details.
"""
warn(_preamble +
"Instead of ``CryptPolicy.from_path(path)``, "
"use ``CryptContext.from_path(path)`` "
" or ``context.load_path(path)`` for an existing CryptContext.",
DeprecationWarning, stacklevel=2)
return cls(_internal_context=CryptContext.from_path(path, section,
encoding))
@classmethod
def from_string(cls, source, section="zdppy_password_hash", encoding="utf-8"):
"""create a CryptPolicy instance from a string.
.. deprecated:: 1.6
Creating a new CryptContext from a string, which was previously done via
``CryptContext(policy=CryptPolicy.from_string(data))``, can now be
done via ``CryptContext.from_string(data)``.
See :meth:`CryptContext.from_string` for details.
Updating an existing CryptContext from a string, which was previously done
``context.policy = CryptPolicy.from_string(data)``, can now be
done via ``context.load(data)``.
See :meth:`CryptContext.load` for details.
"""
warn(_preamble +
"Instead of ``CryptPolicy.from_string(source)``, "
"use ``CryptContext.from_string(source)`` or "
"``context.load(source)`` for an existing CryptContext.",
DeprecationWarning, stacklevel=2)
return cls(_internal_context=CryptContext.from_string(source, section,
encoding))
@classmethod
def from_source(cls, source, _warn=True):
"""create a CryptPolicy instance from some source.
this method autodetects the source type, and invokes
the appropriate constructor automatically. it attempts
to detect whether the source is a configuration string, a filepath,
a dictionary, or an existing CryptPolicy instance.
.. deprecated:: 1.6
Create a new CryptContext, which could previously be done via
``CryptContext(policy=CryptPolicy.from_source(source))``, should
now be done using an explicit method: the :class:`CryptContext`
constructor itself, :meth:`CryptContext.from_path`,
or :meth:`CryptContext.from_string`.
Updating an existing CryptContext, which could previously be done via
``context.policy = CryptPolicy.from_source(source)``, should
now be done using an explicit method: :meth:`CryptContext.update`,
or :meth:`CryptContext.load`.
"""
if _warn:
warn(_preamble +
"Instead of ``CryptPolicy.from_source()``, "
"use ``CryptContext.from_string(path)`` "
" or ``CryptContext.from_path(source)``, as appropriate.",
DeprecationWarning, stacklevel=2)
if isinstance(source, CryptPolicy):
return source
elif isinstance(source, dict):
return cls(_internal_context=CryptContext(**source))
elif not isinstance(source, (bytes,unicode)):
raise TypeError("source must be CryptPolicy, dict, config string, "
"or file path: %r" % (type(source),))
elif any(c in source for c in "\n\r\t") or not source.strip(" \t./;:"):
return cls(_internal_context=CryptContext.from_string(source))
else:
return cls(_internal_context=CryptContext.from_path(source))
@classmethod
def from_sources(cls, sources, _warn=True):
"""create a CryptPolicy instance by merging multiple sources.
each source is interpreted as by :meth:`from_source`,
and the results are merged together.
.. deprecated:: 1.6
Instead of using this method to merge multiple policies together,
a :class:`CryptContext` instance should be created, and then
the multiple sources merged together via :meth:`CryptContext.load`.
"""
if _warn:
warn(_preamble +
"Instead of ``CryptPolicy.from_sources()``, "
"use the various CryptContext constructors "
" followed by ``context.update()``.",
DeprecationWarning, stacklevel=2)
if len(sources) == 0:
raise ValueError("no sources specified")
if len(sources) == 1:
return cls.from_source(sources[0], _warn=False)
kwds = {}
for source in sources:
kwds.update(cls.from_source(source, _warn=False)._context.to_dict(resolve=True))
return cls(_internal_context=CryptContext(**kwds))
def replace(self, *args, **kwds):
"""create a new CryptPolicy, optionally updating parts of the
existing configuration.
.. deprecated:: 1.6
Callers of this method should :meth:`CryptContext.update` or
:meth:`CryptContext.copy` instead.
"""
if self._stub_policy:
warn(_preamble + # pragma: no cover -- deprecated & unused
"Instead of ``context.policy.replace()``, "
"use ``context.update()`` or ``context.copy()``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().replace()``, "
"create a CryptContext instance and "
"use ``context.update()`` or ``context.copy()``.",
DeprecationWarning, stacklevel=2)
sources = [ self ]
if args:
sources.extend(args)
if kwds:
sources.append(kwds)
return CryptPolicy.from_sources(sources, _warn=False)
#===================================================================
# instance attrs
#===================================================================
# internal CryptContext we're wrapping to handle everything
# until this class is removed.
_context = None
# flag indicating this is wrapper generated by the CryptContext.policy
# attribute, rather than one created independantly by the application.
_stub_policy = False
#===================================================================
# init
#===================================================================
def __init__(self, *args, **kwds):
context = kwds.pop("_internal_context", None)
if context:
assert isinstance(context, CryptContext)
self._context = context
self._stub_policy = kwds.pop("_stub_policy", False)
assert not (args or kwds), "unexpected args: %r %r" % (args,kwds)
else:
if args:
if len(args) != 1:
raise TypeError("only one positional argument accepted")
if kwds:
raise TypeError("cannot specify positional arg and kwds")
kwds = args[0]
warn(_preamble +
"Instead of constructing a CryptPolicy instance, "
"create a CryptContext directly, or use ``context.update()`` "
"and ``context.load()`` to reconfigure existing CryptContext "
"instances.",
DeprecationWarning, stacklevel=2)
self._context = CryptContext(**kwds)
#===================================================================
# public interface for examining options
#===================================================================
def has_schemes(self):
"""return True if policy defines *any* schemes for use.
.. deprecated:: 1.6
applications should use ``bool(context.schemes())`` instead.
see :meth:`CryptContext.schemes`.
"""
if self._stub_policy:
warn(_preamble + # pragma: no cover -- deprecated & unused
"Instead of ``context.policy.has_schemes()``, "
"use ``bool(context.schemes())``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().has_schemes()``, "
"create a CryptContext instance and "
"use ``bool(context.schemes())``.",
DeprecationWarning, stacklevel=2)
return bool(self._context.schemes())
def iter_handlers(self):
"""return iterator over handlers defined in policy.
.. deprecated:: 1.6
applications should use ``context.schemes(resolve=True))`` instead.
see :meth:`CryptContext.schemes`.
"""
if self._stub_policy:
warn(_preamble +
"Instead of ``context.policy.iter_handlers()``, "
"use ``context.schemes(resolve=True)``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().iter_handlers()``, "
"create a CryptContext instance and "
"use ``context.schemes(resolve=True)``.",
DeprecationWarning, stacklevel=2)
return self._context.schemes(resolve=True, unconfigured=True)
def schemes(self, resolve=False):
"""return list of schemes defined in policy.
.. deprecated:: 1.6
applications should use :meth:`CryptContext.schemes` instead.
"""
if self._stub_policy:
warn(_preamble + # pragma: no cover -- deprecated & unused
"Instead of ``context.policy.schemes()``, "
"use ``context.schemes()``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().schemes()``, "
"create a CryptContext instance and "
"use ``context.schemes()``.",
DeprecationWarning, stacklevel=2)
return list(self._context.schemes(resolve=resolve, unconfigured=True))
def get_handler(self, name=None, category=None, required=False):
"""return handler as specified by name, or default handler.
.. deprecated:: 1.6
applications should use :meth:`CryptContext.handler` instead,
though note that the ``required`` keyword has been removed,
and the new method will always act as if ``required=True``.
"""
if self._stub_policy:
warn(_preamble +
"Instead of ``context.policy.get_handler()``, "
"use ``context.handler()``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().get_handler()``, "
"create a CryptContext instance and "
"use ``context.handler()``.",
DeprecationWarning, stacklevel=2)
# CryptContext.handler() doesn't support required=False,
# so wrapping it in try/except
try:
return self._context.handler(name, category, unconfigured=True)
except KeyError:
if required:
raise
else:
return None
def get_min_verify_time(self, category=None):
"""get min_verify_time setting for policy.
.. deprecated:: 1.6
min_verify_time option will be removed entirely in zdppy_password_hash 1.8
.. versionchanged:: 1.7
this method now always returns the value automatically
calculated by :meth:`CryptContext.min_verify_time`,
any value specified by policy is ignored.
"""
warn("get_min_verify_time() and min_verify_time option is deprecated and ignored, "
"and will be removed in Passlib 1.8", DeprecationWarning,
stacklevel=2)
return 0
def get_options(self, name, category=None):
"""return dictionary of options specific to a given handler.
.. deprecated:: 1.6
this method has no direct replacement in the 1.6 api, as there
is not a clearly defined use-case. however, examining the output of
:meth:`CryptContext.to_dict` should serve as the closest alternative.
"""
# XXX: might make a public replacement, but need more study of the use cases.
if self._stub_policy:
warn(_preamble + # pragma: no cover -- deprecated & unused
"``context.policy.get_options()`` will no longer be available.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"``CryptPolicy().get_options()`` will no longer be available.",
DeprecationWarning, stacklevel=2)
if hasattr(name, "name"):
name = name.name
return self._context._config._get_record_options_with_flag(name, category)[0]
def handler_is_deprecated(self, name, category=None):
"""check if handler has been deprecated by policy.
.. deprecated:: 1.6
this method has no direct replacement in the 1.6 api, as there
is not a clearly defined use-case. however, examining the output of
:meth:`CryptContext.to_dict` should serve as the closest alternative.
"""
# XXX: might make a public replacement, but need more study of the use cases.
if self._stub_policy:
warn(_preamble +
"``context.policy.handler_is_deprecated()`` will no longer be available.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"``CryptPolicy().handler_is_deprecated()`` will no longer be available.",
DeprecationWarning, stacklevel=2)
if hasattr(name, "name"):
name = name.name
return self._context.handler(name, category).deprecated
#===================================================================
# serialization
#===================================================================
def iter_config(self, ini=False, resolve=False):
"""iterate over key/value pairs representing the policy object.
.. deprecated:: 1.6
applications should use :meth:`CryptContext.to_dict` instead.
"""
if self._stub_policy:
warn(_preamble + # pragma: no cover -- deprecated & unused
"Instead of ``context.policy.iter_config()``, "
"use ``context.to_dict().items()``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().iter_config()``, "
"create a CryptContext instance and "
"use ``context.to_dict().items()``.",
DeprecationWarning, stacklevel=2)
# hacked code that renders keys & values in manner that approximates
# old behavior. context.to_dict() is much cleaner.
context = self._context
if ini:
def render_key(key):
return context._render_config_key(key).replace("__", ".")
def render_value(value):
if isinstance(value, (list,tuple)):
value = ", ".join(value)
return value
resolve = False
else:
render_key = context._render_config_key
render_value = lambda value: value
return (
(render_key(key), render_value(value))
for key, value in context._config.iter_config(resolve)
)
def to_dict(self, resolve=False):
"""export policy object as dictionary of options.
.. deprecated:: 1.6
applications should use :meth:`CryptContext.to_dict` instead.
"""
if self._stub_policy:
warn(_preamble +
"Instead of ``context.policy.to_dict()``, "
"use ``context.to_dict()``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().to_dict()``, "
"create a CryptContext instance and "
"use ``context.to_dict()``.",
DeprecationWarning, stacklevel=2)
return self._context.to_dict(resolve)
def to_file(self, stream, section="zdppy_password_hash"): # pragma: no cover -- deprecated & unused
"""export policy to file.
.. deprecated:: 1.6
applications should use :meth:`CryptContext.to_string` instead,
and then write the output to a file as desired.
"""
if self._stub_policy:
warn(_preamble +
"Instead of ``context.policy.to_file(stream)``, "
"use ``stream.write(context.to_string())``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().to_file(stream)``, "
"create a CryptContext instance and "
"use ``stream.write(context.to_string())``.",
DeprecationWarning, stacklevel=2)
out = self._context.to_string(section=section)
if PY2:
out = out.encode("utf-8")
stream.write(out)
def to_string(self, section="zdppy_password_hash", encoding=None):
"""export policy to file.
.. deprecated:: 1.6
applications should use :meth:`CryptContext.to_string` instead.
"""
if self._stub_policy:
warn(_preamble + # pragma: no cover -- deprecated & unused
"Instead of ``context.policy.to_string()``, "
"use ``context.to_string()``.",
DeprecationWarning, stacklevel=2)
else:
warn(_preamble +
"Instead of ``CryptPolicy().to_string()``, "
"create a CryptContext instance and "
"use ``context.to_string()``.",
DeprecationWarning, stacklevel=2)
out = self._context.to_string(section=section)
if encoding:
out = out.encode(encoding)
return out
#===================================================================
# eoc
#===================================================================
#=============================================================================
# _CryptConfig helper class
#=============================================================================
class _CryptConfig(object):
"""parses, validates, and stores CryptContext config
this is a helper used internally by CryptContext to handle
parsing, validation, and serialization of its config options.
split out from the main class, but not made public since
that just complicates interface too much (c.f. CryptPolicy)
:arg source: config as dict mapping ``(cat,scheme,option) -> value``
"""
#===================================================================
# instance attrs
#===================================================================
# triple-nested dict which maps scheme -> category -> key -> value,
# storing all hash-specific options
_scheme_options = None
# double-nested dict which maps key -> category -> value
# storing all CryptContext options
_context_options = None
# tuple of handler objects
handlers = None
# tuple of scheme objects in same order as handlers
schemes = None
# tuple of categories in alphabetical order (not including None)
categories = None
# set of all context keywords used by active schemes
context_kwds = None
# dict mapping category -> default scheme
_default_schemes = None
# dict mapping (scheme, category) -> custom handler
_records = None
# dict mapping category -> list of custom handler instances for that category,
# in order of schemes(). populated on demand by _get_record_list()
_record_lists = None
#===================================================================
# constructor
#===================================================================
def __init__(self, source):
self._init_scheme_list(source.get((None,None,"schemes")))
self._init_options(source)
self._init_default_schemes()
self._init_records()
def _init_scheme_list(self, data):
"""initialize .handlers and .schemes attributes"""
handlers = []
schemes = []
if isinstance(data, native_string_types):
data = splitcomma(data)
for elem in data or ():
# resolve elem -> handler & scheme
if hasattr(elem, "name"):
handler = elem
scheme = handler.name
_validate_handler_name(scheme)
elif isinstance(elem, native_string_types):
handler = get_crypt_handler(elem)
scheme = handler.name
else:
raise TypeError("scheme must be name or CryptHandler, "
"not %r" % type(elem))
# check scheme name isn't already in use
if scheme in schemes:
raise KeyError("multiple handlers with same name: %r" %
(scheme,))
# add to handler list
handlers.append(handler)
schemes.append(scheme)
self.handlers = tuple(handlers)
self.schemes = tuple(schemes)
#===================================================================
# lowlevel options
#===================================================================
#---------------------------------------------------------------
# init lowlevel option storage
#---------------------------------------------------------------
def _init_options(self, source):
"""load config dict into internal representation,
and init .categories attr
"""
# prepare dicts & locals
norm_scheme_option = self._norm_scheme_option
norm_context_option = self._norm_context_option
self._scheme_options = scheme_options = {}
self._context_options = context_options = {}
categories = set()
# load source config into internal storage
for (cat, scheme, key), value in iteritems(source):
categories.add(cat)
explicit_scheme = scheme
if not cat and not scheme and key in _global_settings:
# going forward, not using "<cat>__all__<key>" format. instead...
# whitelisting set of keys which should be passed to (all) schemes,
# rather than passed to the CryptContext itself
scheme = "all"
if scheme:
# normalize scheme option
key, value = norm_scheme_option(key, value)
# e.g. things like "min_rounds" should never be set cross-scheme
# this will be fatal under 2.0.
if scheme == "all" and key not in _global_settings:
warn("The '%s' option should be configured per-algorithm, and not set "
"globally in the context; This will be an error in Passlib 2.0" %
(key,), PasslibConfigWarning)
# this scheme is going away in 2.0;
# but most keys deserve an extra warning since it impacts security.
if explicit_scheme == "all":
warn("The 'all' scheme is deprecated as of Passlib 1.7, "
"and will be removed in Passlib 2.0; Please configure "
"options on a per-algorithm basis.", DeprecationWarning)
# store in scheme_options
# map structure: scheme_options[scheme][category][key] = value
try:
category_map = scheme_options[scheme]
except KeyError:
scheme_options[scheme] = {cat: {key: value}}
else:
try:
option_map = category_map[cat]
except KeyError:
category_map[cat] = {key: value}
else:
option_map[key] = value
else:
# normalize context option
if cat and key == "schemes":
raise KeyError("'schemes' context option is not allowed "
"per category")
key, value = norm_context_option(cat, key, value)
if key == "min_verify_time": # ignored in 1.7, to be removed in 1.8
continue
# store in context_options
# map structure: context_options[key][category] = value
try:
category_map = context_options[key]
except KeyError:
context_options[key] = {cat: value}
else:
category_map[cat] = value
# store list of configured categories
categories.discard(None)
self.categories = tuple(sorted(categories))
def _norm_scheme_option(self, key, value):
# check for invalid options
if key in _forbidden_scheme_options:
raise KeyError("%r option not allowed in CryptContext "
"configuration" % (key,))
# coerce strings for certain fields (e.g. min_rounds uses ints)
if isinstance(value, native_string_types):
func = _coerce_scheme_options.get(key)
if func:
value = func(value)
return key, value
def _norm_context_option(self, cat, key, value):
schemes = self.schemes
if key == "default":
if hasattr(value, "name"):
value = value.name
elif not isinstance(value, native_string_types):
raise ExpectedTypeError(value, "str", "default")
if schemes and value not in schemes:
raise KeyError("default scheme not found in policy")
elif key == "deprecated":
if isinstance(value, native_string_types):
value = splitcomma(value)
elif not isinstance(value, (list,tuple)):
raise ExpectedTypeError(value, "str or seq", "deprecated")
if 'auto' in value:
# XXX: have any statements been made about when this is default?
# should do it in 1.8 at latest.
if len(value) > 1:
raise ValueError("cannot list other schemes if "
"``deprecated=['auto']`` is used")
elif schemes:
# make sure list of deprecated schemes is subset of configured schemes
for scheme in value:
if not isinstance(scheme, native_string_types):
raise ExpectedTypeError(value, "str", "deprecated element")
if scheme not in schemes:
raise KeyError("deprecated scheme not found "
"in policy: %r" % (scheme,))
elif key == "min_verify_time":
warn("'min_verify_time' was deprecated in Passlib 1.6, is "
"ignored in 1.7, and will be removed in 1.8",
DeprecationWarning)
elif key == "harden_verify":
warn("'harden_verify' is deprecated & ignored as of Passlib 1.7.1, "
" and will be removed in 1.8",
DeprecationWarning)
elif key != "schemes":
raise KeyError("unknown CryptContext keyword: %r" % (key,))
return key, value
#---------------------------------------------------------------
# reading context options
#---------------------------------------------------------------
def get_context_optionmap(self, key, _default={}):
"""return dict mapping category->value for specific context option.
.. warning:: treat return value as readonly!
"""
return self._context_options.get(key, _default)
def get_context_option_with_flag(self, category, key):
"""return value of specific option, handling category inheritance.
also returns flag indicating whether value is category-specific.
"""
try:
category_map = self._context_options[key]
except KeyError:
return None, False
value = category_map.get(None)
if category:
try:
alt = category_map[category]
except KeyError:
pass
else:
if value is None or alt != value:
return alt, True
return value, False
#---------------------------------------------------------------
# reading scheme options
#---------------------------------------------------------------
def _get_scheme_optionmap(self, scheme, category, default={}):
"""return all options for (scheme,category) combination
.. warning:: treat return value as readonly!
"""
try:
return self._scheme_options[scheme][category]
except KeyError:
return default
def get_base_handler(self, scheme):
return self.handlers[self.schemes.index(scheme)]
@staticmethod
def expand_settings(handler):
setting_kwds = handler.setting_kwds
if 'rounds' in handler.setting_kwds:
# XXX: historically this extras won't be listed in setting_kwds
setting_kwds += uh.HasRounds.using_rounds_kwds
return setting_kwds
# NOTE: this is only used by _get_record_options_with_flag()...
def get_scheme_options_with_flag(self, scheme, category):
"""return composite dict of all options set for scheme.
includes options inherited from 'all' and from default category.
result can be modified.
returns (kwds, has_cat_specific_options)
"""
# start out with copy of global options
get_optionmap = self._get_scheme_optionmap
kwds = get_optionmap("all", None).copy()
has_cat_options = False
# add in category-specific global options
if category:
defkwds = kwds.copy() # <-- used to detect category-specific options
kwds.update(get_optionmap("all", category))
# filter out global settings not supported by handler
allowed_settings = self.expand_settings(self.get_base_handler(scheme))
for key in set(kwds).difference(allowed_settings):
kwds.pop(key)
if category:
for key in set(defkwds).difference(allowed_settings):
defkwds.pop(key)
# add in default options for scheme
other = get_optionmap(scheme, None)
kwds.update(other)
# load category-specific options for scheme
if category:
defkwds.update(other)
kwds.update(get_optionmap(scheme, category))
# compare default category options to see if there's anything
# category-specific
if kwds != defkwds:
has_cat_options = True
return kwds, has_cat_options
#===================================================================
# deprecated & default schemes
#===================================================================
def _init_default_schemes(self):
"""initialize maps containing default scheme for each category.
have to do this after _init_options(), since the default scheme
is affected by the list of deprecated schemes.
"""
# init maps & locals
get_optionmap = self.get_context_optionmap
default_map = self._default_schemes = get_optionmap("default").copy()
dep_map = get_optionmap("deprecated")
schemes = self.schemes
if not schemes:
return
# figure out default scheme
deps = dep_map.get(None) or ()
default = default_map.get(None)
if not default:
for scheme in schemes:
if scheme not in deps:
default_map[None] = scheme
break
else:
raise ValueError("must have at least one non-deprecated scheme")
elif default in deps:
raise ValueError("default scheme cannot be deprecated")
# figure out per-category default schemes,
for cat in self.categories:
cdeps = dep_map.get(cat, deps)
cdefault = default_map.get(cat, default)
if not cdefault:
for scheme in schemes:
if scheme not in cdeps:
default_map[cat] = scheme
break
else:
raise ValueError("must have at least one non-deprecated "
"scheme for %r category" % cat)
elif cdefault in cdeps:
raise ValueError("default scheme for %r category "
"cannot be deprecated" % cat)
def default_scheme(self, category):
"""return default scheme for specific category"""
defaults = self._default_schemes
try:
return defaults[category]
except KeyError:
pass
if not self.schemes:
raise KeyError("no hash schemes configured for this "
"CryptContext instance")
return defaults[None]
def is_deprecated_with_flag(self, scheme, category):
"""is scheme deprecated under particular category?"""
depmap = self.get_context_optionmap("deprecated")
def test(cat):
source = depmap.get(cat, depmap.get(None))
if source is None:
return None
elif 'auto' in source:
return scheme != self.default_scheme(cat)
else:
return scheme in source
value = test(None) or False
if category:
alt = test(category)
if alt is not None and value != alt:
return alt, True
return value, False
#===================================================================
# CryptRecord objects
#===================================================================
def _init_records(self):
# NOTE: this step handles final validation of settings,
# checking for violations against handler's internal invariants.
# this is why we create all the records now,
# so CryptContext throws error immediately rather than later.
self._record_lists = {}
records = self._records = {}
all_context_kwds = self.context_kwds = set()
get_options = self._get_record_options_with_flag
categories = (None,) + self.categories
for handler in self.handlers:
scheme = handler.name
all_context_kwds.update(handler.context_kwds)
for cat in categories:
kwds, has_cat_options = get_options(scheme, cat)
if cat is None or has_cat_options:
records[scheme, cat] = self._create_record(handler, cat, **kwds)
# NOTE: if handler has no category-specific opts, get_record()
# will automatically use the default category's record.
# NOTE: default records for specific category stored under the
# key (None,category); these are populated on-demand by get_record().
@staticmethod
def _create_record(handler, category=None, deprecated=False, **settings):
# create custom handler if needed.
try:
# XXX: relaxed=True is mostly here to retain backwards-compat behavior.
# could make this optional flag in future.
subcls = handler.using(relaxed=True, **settings)
except TypeError as err:
m = re.match(r".* unexpected keyword argument '(.*)'$", str(err))
if m and m.group(1) in settings:
# translate into KeyError, for backwards compat.
# XXX: push this down to GenericHandler.using() implementation?
key = m.group(1)
raise KeyError("keyword not supported by %s handler: %r" %
(handler.name, key))
raise
# using private attrs to store some extra metadata in custom handler
assert subcls is not handler, "expected unique variant of handler"
##subcls._Context__category = category
subcls._Context__orig_handler = handler
subcls.deprecated = deprecated # attr reserved for this purpose
return subcls
def _get_record_options_with_flag(self, scheme, category):
"""return composite dict of options for given scheme + category.
this is currently a private method, though some variant
of its output may eventually be made public.
given a scheme & category, it returns two things:
a set of all the keyword options to pass to :meth:`_create_record`,
and a bool flag indicating whether any of these options
were specific to the named category. if this flag is false,
the options are identical to the options for the default category.
the options dict includes all the scheme-specific settings,
as well as optional *deprecated* keyword.
"""
# get scheme options
kwds, has_cat_options = self.get_scheme_options_with_flag(scheme, category)
# throw in deprecated flag
value, not_inherited = self.is_deprecated_with_flag(scheme, category)
if value:
kwds['deprecated'] = True
if not_inherited:
has_cat_options = True
return kwds, has_cat_options
def get_record(self, scheme, category):
"""return record for specific scheme & category (cached)"""
# NOTE: this is part of the critical path shared by
# all of CryptContext's PasswordHash methods,
# hence all the caching and error checking.
# quick lookup in cache
try:
return self._records[scheme, category]
except KeyError:
pass
# type check
if category is not None and not isinstance(category, native_string_types):
if PY2 and isinstance(category, unicode):
# for compatibility with unicode-centric py2 apps
return self.get_record(scheme, category.encode("utf-8"))
raise ExpectedTypeError(category, "str or None", "category")
if scheme is not None and not isinstance(scheme, native_string_types):
raise ExpectedTypeError(scheme, "str or None", "scheme")
# if scheme=None,
# use record for category's default scheme, and cache result.
if not scheme:
default = self.default_scheme(category)
assert default
record = self._records[None, category] = self.get_record(default,
category)
return record
# if no record for (scheme, category),
# use record for (scheme, None), and cache result.
if category:
try:
cache = self._records
record = cache[scheme, category] = cache[scheme, None]
return record
except KeyError:
pass
# scheme not found in configuration for default category
raise KeyError("crypt algorithm not found in policy: %r" % (scheme,))
def _get_record_list(self, category=None):
"""return list of records for category (cached)
this is an internal helper used only by identify_record()
"""
# type check of category - handled by _get_record()
# quick lookup in cache
try:
return self._record_lists[category]
except KeyError:
pass
# cache miss - build list from scratch
value = self._record_lists[category] = [
self.get_record(scheme, category)
for scheme in self.schemes
]
return value
def identify_record(self, hash, category, required=True):
"""internal helper to identify appropriate custom handler for hash"""
# NOTE: this is part of the critical path shared by
# all of CryptContext's PasswordHash methods,
# hence all the caching and error checking.
# FIXME: if multiple hashes could match (e.g. lmhash vs nthash)
# this will only return first match. might want to do something
# about this in future, but for now only hashes with
# unique identifiers will work properly in a CryptContext.
# XXX: if all handlers have a unique prefix (e.g. all are MCF / LDAP),
# could use dict-lookup to speed up this search.
if not isinstance(hash, unicode_or_bytes_types):
raise ExpectedStringError(hash, "hash")
# type check of category - handled by _get_record_list()
for record in self._get_record_list(category):
if record.identify(hash):
return record
if not required:
return None
elif not self.schemes:
raise KeyError("no crypt algorithms supported")
else:
raise exc.UnknownHashError("hash could not be identified")
@memoized_property
def disabled_record(self):
for record in self._get_record_list(None):
if record.is_disabled:
return record
raise RuntimeError("no disabled hasher present "
"(perhaps add 'unix_disabled' to list of schemes?)")
#===================================================================
# serialization
#===================================================================
def iter_config(self, resolve=False):
"""regenerate original config.
this is an iterator which yields ``(cat,scheme,option),value`` items,
in the order they generally appear inside an INI file.
if interpreted as a dictionary, it should match the original
keywords passed to the CryptContext (aside from any canonization).
it's mainly used as the internal backend for most of the public
serialization methods.
"""
# grab various bits of data
scheme_options = self._scheme_options
context_options = self._context_options
scheme_keys = sorted(scheme_options)
context_keys = sorted(context_options)
# write loaded schemes (may differ from 'schemes' local var)
if 'schemes' in context_keys:
context_keys.remove("schemes")
value = self.handlers if resolve else self.schemes
if value:
yield (None, None, "schemes"), list(value)
# then run through config for each user category
for cat in (None,) + self.categories:
# write context options
for key in context_keys:
try:
value = context_options[key][cat]
except KeyError:
pass
else:
if isinstance(value, list):
value = list(value)
yield (cat, None, key), value
# write per-scheme options for all schemes.
for scheme in scheme_keys:
try:
kwds = scheme_options[scheme][cat]
except KeyError:
pass
else:
for key in sorted(kwds):
yield (cat, scheme, key), kwds[key]
#===================================================================
# eoc
#===================================================================
#=============================================================================
# main CryptContext class
#=============================================================================
class CryptContext(object):
"""Helper for hashing & verifying passwords using multiple algorithms.
Instances of this class allow applications to choose a specific
set of hash algorithms which they wish to support, set limits and defaults
for the rounds and salt sizes those algorithms should use, flag
which algorithms should be deprecated, and automatically handle
migrating users to stronger hashes when they log in.
Basic usage::
>>> ctx = CryptContext(schemes=[...])
See the Passlib online documentation for details and full documentation.
"""
# FIXME: altering the configuration of this object isn't threadsafe,
# but is generally only done during application init, so not a major
# issue (just yet).
# XXX: would like some way to restrict the categories that are allowed,
# to restrict what the app OR the config can use.
# XXX: add wrap/unwrap callback hooks so app can mutate hash format?
# XXX: add method for detecting and warning user about schemes
# which don't have any good distinguishing marks?
# or greedy ones (unix_disabled, plaintext) which are not listed at the end?
#===================================================================
# instance attrs
#===================================================================
# _CryptConfig instance holding current parsed config
_config = None
# copy of _config methods, stored in CryptContext instance for speed.
_get_record = None
_identify_record = None
#===================================================================
# secondary constructors
#===================================================================
@classmethod
def _norm_source(cls, source):
"""internal helper - accepts string, dict, or context"""
if isinstance(source, dict):
return cls(**source)
elif isinstance(source, cls):
return source
else:
self = cls()
self.load(source)
return self
@classmethod
def from_string(cls, source, section="zdppy_password_hash", encoding="utf-8"):
"""create new CryptContext instance from an INI-formatted string.
:type source: unicode or bytes
:arg source:
string containing INI-formatted content.
:type section: str
:param section:
option name of section to read from, defaults to ``"zdppy_password_hash"``.
:type encoding: str
:arg encoding:
optional encoding used when source is bytes, defaults to ``"utf-8"``.
:returns:
new :class:`CryptContext` instance, configured based on the
parameters in the *source* string.
Usage example::
>>> from zdppy_password_hash.context import CryptContext
>>> context = CryptContext.from_string('''
... [zdppy_password_hash]
... schemes = sha256_crypt, des_crypt
... sha256_crypt__default_rounds = 30000
... ''')
.. versionadded:: 1.6
.. seealso:: :meth:`to_string`, the inverse of this constructor.
"""
if not isinstance(source, unicode_or_bytes_types):
raise ExpectedTypeError(source, "unicode or bytes", "source")
self = cls(_autoload=False)
self.load(source, section=section, encoding=encoding)
return self
@classmethod
def from_path(cls, path, section="zdppy_password_hash", encoding="utf-8"):
"""create new CryptContext instance from an INI-formatted file.
this functions exactly the same as :meth:`from_string`,
except that it loads from a local file.
:type path: str
:arg path:
path to local file containing INI-formatted config.
:type section: str
:param section:
option name of section to read from, defaults to ``"zdppy_password_hash"``.
:type encoding: str
:arg encoding:
encoding used to load file, defaults to ``"utf-8"``.
:returns:
new CryptContext instance, configured based on the parameters
stored in the file *path*.
.. versionadded:: 1.6
.. seealso:: :meth:`from_string` for an equivalent usage example.
"""
self = cls(_autoload=False)
self.load_path(path, section=section, encoding=encoding)
return self
def copy(self, **kwds):
"""Return copy of existing CryptContext instance.
This function returns a new CryptContext instance whose configuration
is exactly the same as the original, with the exception that any keywords
passed in will take precedence over the original settings.
As an example::
>>> from zdppy_password_hash.context import CryptContext
>>> # given an existing context...
>>> ctx1 = CryptContext(["sha256_crypt", "md5_crypt"])
>>> # copy can be used to make a clone, and update
>>> # some of the settings at the same time...
>>> ctx2 = custom_app_context.copy(default="md5_crypt")
>>> # and the original will be unaffected by the change
>>> ctx1.default_scheme()
"sha256_crypt"
>>> ctx2.default_scheme()
"md5_crypt"
.. versionadded:: 1.6
This method was previously named :meth:`!replace`. That alias
has been deprecated, and will be removed in Passlib 1.8.
.. seealso:: :meth:`update`
"""
# XXX: it would be faster to store ref to self._config,
# but don't want to share config objects til sure
# can rely on them being immutable.
other = CryptContext(_autoload=False)
other.load(self)
if kwds:
other.load(kwds, update=True)
return other
def using(self, **kwds):
"""
alias for :meth:`copy`, to match PasswordHash.using()
"""
return self.copy(**kwds)
def replace(self, **kwds):
"""deprecated alias of :meth:`copy`"""
warn("CryptContext().replace() has been deprecated in Passlib 1.6, "
"and will be removed in Passlib 1.8, "
"it has been renamed to CryptContext().copy()",
DeprecationWarning, stacklevel=2)
return self.copy(**kwds)
#===================================================================
# init
#===================================================================
def __init__(self, schemes=None,
# keyword only...
policy=_UNSET, # <-- deprecated
_autoload=True, **kwds):
# XXX: add ability to make flag certain contexts as immutable,
# e.g. the builtin zdppy_password_hash ones?
# XXX: add a name or import path for the contexts, to help out repr?
if schemes is not None:
kwds['schemes'] = schemes
if policy is not _UNSET:
warn("The CryptContext ``policy`` keyword has been deprecated as of Passlib 1.6, "
"and will be removed in Passlib 1.8; please use "
"``CryptContext.from_string()` or "
"``CryptContext.from_path()`` instead.",
DeprecationWarning)
if policy is None:
self.load(kwds)
elif isinstance(policy, CryptPolicy):
self.load(policy._context)
self.update(kwds)
else:
raise TypeError("policy must be a CryptPolicy instance")
elif _autoload:
self.load(kwds)
else:
assert not kwds, "_autoload=False and kwds are mutually exclusive"
# XXX: would this be useful?
##def __str__(self):
## if PY3:
## return self.to_string()
## else:
## return self.to_string().encode("utf-8")
def __repr__(self):
return "<CryptContext at 0x%0x>" % id(self)
#===================================================================
# deprecated policy object
#===================================================================
def _get_policy(self):
# The CryptPolicy class has been deprecated, so to support any
# legacy accesses, we create a stub policy object so .policy attr
# will continue to work.
#
# the code waits until app accesses a specific policy object attribute
# before issuing deprecation warning, so developer gets method-specific
# suggestion for how to upgrade.
# NOTE: making a copy of the context so the policy acts like a snapshot,
# to retain the pre-1.6 behavior.
return CryptPolicy(_internal_context=self.copy(), _stub_policy=True)
def _set_policy(self, policy):
warn("The CryptPolicy class and the ``context.policy`` attribute have "
"been deprecated as of Passlib 1.6, and will be removed in "
"Passlib 1.8; please use the ``context.load()`` and "
"``context.update()`` methods instead.",
DeprecationWarning, stacklevel=2)
if isinstance(policy, CryptPolicy):
self.load(policy._context)
else:
raise TypeError("expected CryptPolicy instance")
policy = property(_get_policy, _set_policy,
doc="[deprecated] returns CryptPolicy instance "
"tied to this CryptContext")
#===================================================================
# loading / updating configuration
#===================================================================
@staticmethod
def _parse_ini_stream(stream, section, filename):
"""helper read INI from stream, extract zdppy_password_hash section as dict"""
# NOTE: this expects a unicode stream under py3,
# and a utf-8 bytes stream under py2,
# allowing the resulting dict to always use native strings.
p = SafeConfigParser()
if PY3:
# python 3.2 deprecated readfp in favor of read_file
p.read_file(stream, filename)
else:
p.readfp(stream, filename)
# XXX: could change load() to accept list of items,
# and skip intermediate dict creation
return dict(p.items(section))
def load_path(self, path, update=False, section="zdppy_password_hash", encoding="utf-8"):
"""Load new configuration into CryptContext from a local file.
This function is a wrapper for :meth:`load` which
loads a configuration string from the local file *path*,
instead of an in-memory source. Its behavior and options
are otherwise identical to :meth:`!load` when provided with
an INI-formatted string.
.. versionadded:: 1.6
"""
def helper(stream):
kwds = self._parse_ini_stream(stream, section, path)
return self.load(kwds, update=update)
if PY3:
# decode to unicode, which load() expected under py3
with open(path, "rt", encoding=encoding) as stream:
return helper(stream)
elif encoding in ["utf-8", "ascii"]:
# keep as utf-8 bytes, which load() expects under py2
with open(path, "rb") as stream:
return helper(stream)
else:
# transcode to utf-8 bytes
with open(path, "rb") as fh:
tmp = fh.read().decode(encoding).encode("utf-8")
return helper(BytesIO(tmp))
def load(self, source, update=False, section="zdppy_password_hash", encoding="utf-8"):
"""Load new configuration into CryptContext, replacing existing config.
:arg source:
source of new configuration to load.
this value can be a number of different types:
* a :class:`!dict` object, or compatible Mapping
the key/value pairs will be interpreted the same
keywords for the :class:`CryptContext` class constructor.
* a :class:`!unicode` or :class:`!bytes` string
this will be interpreted as an INI-formatted file,
and appropriate key/value pairs will be loaded from
the specified *section*.
* another :class:`!CryptContext` object.
this will export a snapshot of its configuration
using :meth:`to_dict`.
:type update: bool
:param update:
By default, :meth:`load` will replace the existing configuration
entirely. If ``update=True``, it will preserve any existing
configuration options that are not overridden by the new source,
much like the :meth:`update` method.
:type section: str
:param section:
When parsing an INI-formatted string, :meth:`load` will look for
a section named ``"zdppy_password_hash"``. This option allows an alternate
section name to be used. Ignored when loading from a dictionary.
:type encoding: str
:param encoding:
Encoding to use when **source** is bytes.
Defaults to ``"utf-8"``. Ignored when loading from a dictionary.
.. deprecated:: 1.8
This keyword, and support for bytes input, will be dropped in Passlib 2.0
:raises TypeError:
* If the source cannot be identified.
* If an unknown / malformed keyword is encountered.
:raises ValueError:
If an invalid keyword value is encountered.
.. note::
If an error occurs during a :meth:`!load` call, the :class:`!CryptContext`
instance will be restored to the configuration it was in before
the :meth:`!load` call was made; this is to ensure it is
*never* left in an inconsistent state due to a load error.
.. versionadded:: 1.6
"""
#-----------------------------------------------------------
# autodetect source type, convert to dict
#-----------------------------------------------------------
parse_keys = True
if isinstance(source, unicode_or_bytes_types):
if PY3:
source = to_unicode(source, encoding, param="source")
else:
source = to_bytes(source, "utf-8", source_encoding=encoding,
param="source")
source = self._parse_ini_stream(NativeStringIO(source), section,
"<string passed to CryptContext.load()>")
elif isinstance(source, CryptContext):
# extract dict directly from config, so it can be merged later
source = dict(source._config.iter_config(resolve=True))
parse_keys = False
elif not hasattr(source, "items"):
# mappings are left alone, otherwise throw an error.
raise ExpectedTypeError(source, "string or dict", "source")
# XXX: add support for other iterable types, e.g. sequence of pairs?
#-----------------------------------------------------------
# parse dict keys into (category, scheme, option) format,
# and merge with existing configuration if needed.
#-----------------------------------------------------------
if parse_keys:
parse = self._parse_config_key
source = dict((parse(key), value)
for key, value in iteritems(source))
if update and self._config is not None:
# if updating, do nothing if source is empty,
if not source:
return
# otherwise overlay source on top of existing config
tmp = source
source = dict(self._config.iter_config(resolve=True))
source.update(tmp)
#-----------------------------------------------------------
# compile into _CryptConfig instance, and update state
#-----------------------------------------------------------
config = _CryptConfig(source)
self._config = config
self._reset_dummy_verify()
self._get_record = config.get_record
self._identify_record = config.identify_record
if config.context_kwds:
# (re-)enable method for this instance (in case ELSE clause below ran last load).
self.__dict__.pop("_strip_unused_context_kwds", None)
else:
# disable method for this instance, it's not needed.
self._strip_unused_context_kwds = None
@staticmethod
def _parse_config_key(ckey):
"""helper used to parse ``cat__scheme__option`` keys into a tuple"""
# split string into 1-3 parts
assert isinstance(ckey, native_string_types)
parts = ckey.replace(".", "__").split("__")
count = len(parts)
if count == 1:
cat, scheme, key = None, None, parts[0]
elif count == 2:
cat = None
scheme, key = parts
elif count == 3:
cat, scheme, key = parts
else:
raise TypeError("keys must have less than 3 separators: %r" %
(ckey,))
# validate & normalize the parts
if cat == "default":
cat = None
elif not cat and cat is not None:
raise TypeError("empty category: %r" % ckey)
if scheme == "context":
scheme = None
elif not scheme and scheme is not None:
raise TypeError("empty scheme: %r" % ckey)
if not key:
raise TypeError("empty option: %r" % ckey)
return cat, scheme, key
def update(self, *args, **kwds):
"""Helper for quickly changing configuration.
This acts much like the :meth:`!dict.update` method:
it updates the context's configuration,
replacing the original value(s) for the specified keys,
and preserving the rest.
It accepts any :ref:`keyword <context-options>`
accepted by the :class:`!CryptContext` constructor.
.. versionadded:: 1.6
.. seealso:: :meth:`copy`
"""
if args:
if len(args) > 1:
raise TypeError("expected at most one positional argument")
if kwds:
raise TypeError("positional arg and keywords mutually exclusive")
self.load(args[0], update=True)
elif kwds:
self.load(kwds, update=True)
# XXX: make this public? even just as flag to load?
# FIXME: this function suffered some bitrot in 1.6.1,
# will need to be updated before works again.
##def _simplify(self):
## "helper to remove redundant/unused options"
## # don't do anything if no schemes are defined
## if not self._schemes:
## return
##
## def strip_items(target, filter):
## keys = [key for key,value in iteritems(target)
## if filter(key,value)]
## for key in keys:
## del target[key]
##
## # remove redundant default.
## defaults = self._default_schemes
## if defaults.get(None) == self._schemes[0]:
## del defaults[None]
##
## # remove options for unused schemes.
## scheme_options = self._scheme_options
## schemes = self._schemes + ("all",)
## strip_items(scheme_options, lambda k,v: k not in schemes)
##
## # remove rendundant cat defaults.
## cur = self.default_scheme()
## strip_items(defaults, lambda k,v: k and v==cur)
##
## # remove redundant category deprecations.
## # TODO: this should work w/ 'auto', but needs closer inspection
## deprecated = self._deprecated_schemes
## cur = self._deprecated_schemes.get(None)
## strip_items(deprecated, lambda k,v: k and v==cur)
##
## # remove redundant category options.
## for scheme, config in iteritems(scheme_options):
## if None in config:
## cur = config[None]
## strip_items(config, lambda k,v: k and v==cur)
##
## # XXX: anything else?
#===================================================================
# reading configuration
#===================================================================
def schemes(self, resolve=False, category=None, unconfigured=False):
"""return schemes loaded into this CryptContext instance.
:type resolve: bool
:arg resolve:
if ``True``, will return a tuple of :class:`~zdppy_password_hash.ifc.PasswordHash`
objects instead of their names.
:returns:
returns tuple of the schemes configured for this context
via the *schemes* option.
.. versionadded:: 1.6
This was previously available as ``CryptContext().policy.schemes()``
.. seealso:: the :ref:`schemes <context-schemes-option>` option for usage example.
"""
# XXX: should resolv return records rather than handlers?
# or deprecate resolve keyword completely?
# offering up a .hashers Mapping in v1.8 would be great.
# NOTE: supporting 'category' and 'unconfigured' kwds as of 1.7
# just to pass through to .handler(), but not documenting them...
# may not need to put them to use.
schemes = self._config.schemes
if resolve:
return tuple(self.handler(scheme, category, unconfigured=unconfigured)
for scheme in schemes)
else:
return schemes
def default_scheme(self, category=None, resolve=False, unconfigured=False):
"""return name of scheme that :meth:`hash` will use by default.
:type resolve: bool
:arg resolve:
if ``True``, will return a :class:`~zdppy_password_hash.ifc.PasswordHash`
object instead of the name.
:type category: str or None
:param category:
Optional :ref:`user category <user-categories>`.
If specified, this will return the catgory-specific default scheme instead.
:returns:
name of the default scheme.
.. seealso:: the :ref:`default <context-default-option>` option for usage example.
.. versionadded:: 1.6
.. versionchanged:: 1.7
This now returns a hasher configured with any CryptContext-specific
options (custom rounds settings, etc). Previously this returned
the base hasher from :mod:`zdppy_password_hash.hash`.
"""
# XXX: deprecate this in favor of .handler() or whatever it's replaced with?
# NOTE: supporting 'unconfigured' kwds as of 1.7
# just to pass through to .handler(), but not documenting them...
# may not need to put them to use.
hasher = self.handler(None, category, unconfigured=unconfigured)
return hasher if resolve else hasher.name
# XXX: need to decide if exposing this would be useful in any way
##def categories(self):
## """return user-categories with algorithm-specific options in this CryptContext.
##
## this will always return a tuple.
## if no categories besides the default category have been configured,
## the tuple will be empty.
## """
## return self._config.categories
# XXX: need to decide if exposing this would be useful to applications
# in any meaningful way that isn't already served by to_dict()
##def options(self, scheme, category=None):
## kwds, percat = self._config.get_options(scheme, category)
## return kwds
def handler(self, scheme=None, category=None, unconfigured=False):
"""helper to resolve name of scheme -> :class:`~zdppy_password_hash.ifc.PasswordHash` object used by scheme.
:arg scheme:
This should identify the scheme to lookup.
If omitted or set to ``None``, this will return the handler
for the default scheme.
:arg category:
If a user category is specified, and no scheme is provided,
it will use the default for that category.
Otherwise this parameter is ignored.
:param unconfigured:
By default, this returns a handler object whose .hash()
and .needs_update() methods will honor the configured
provided by CryptContext. See ``unconfigured=True``
to get the underlying handler from before any context-specific
configuration was applied.
:raises KeyError:
If the scheme does not exist OR is not being used within this context.
:returns:
:class:`~zdppy_password_hash.ifc.PasswordHash` object used to implement
the named scheme within this context (this will usually
be one of the objects from :mod:`zdppy_password_hash.hash`)
.. versionadded:: 1.6
This was previously available as ``CryptContext().policy.get_handler()``
.. versionchanged:: 1.7
This now returns a hasher configured with any CryptContext-specific
options (custom rounds settings, etc). Previously this returned
the base hasher from :mod:`zdppy_password_hash.hash`.
"""
try:
hasher = self._get_record(scheme, category)
if unconfigured:
return hasher._Context__orig_handler
else:
return hasher
except KeyError:
pass
if self._config.handlers:
raise KeyError("crypt algorithm not found in this "
"CryptContext instance: %r" % (scheme,))
else:
raise KeyError("no crypt algorithms loaded in this "
"CryptContext instance")
def _get_unregistered_handlers(self):
"""check if any handlers in this context aren't in the global registry"""
return tuple(handler for handler in self._config.handlers
if not _is_handler_registered(handler))
@property
def context_kwds(self):
"""
return :class:`!set` containing union of all :ref:`contextual keywords <context-keywords>`
supported by the handlers in this context.
.. versionadded:: 1.6.6
"""
return self._config.context_kwds
#===================================================================
# exporting config
#===================================================================
@staticmethod
def _render_config_key(key):
"""convert 3-part config key to single string"""
cat, scheme, option = key
if cat:
return "%s__%s__%s" % (cat, scheme or "context", option)
elif scheme:
return "%s__%s" % (scheme, option)
else:
return option
@staticmethod
def _render_ini_value(key, value):
"""render value to string suitable for INI file"""
# convert lists to comma separated lists
# (mainly 'schemes' & 'deprecated')
if isinstance(value, (list,tuple)):
value = ", ".join(value)
# convert numbers to strings
elif isinstance(value, num_types):
if isinstance(value, float) and key[2] == "vary_rounds":
value = ("%.2f" % value).rstrip("0") if value else "0"
else:
value = str(value)
assert isinstance(value, native_string_types), \
"expected string for key: %r %r" % (key, value)
# escape any percent signs.
return value.replace("%", "%%")
def to_dict(self, resolve=False):
"""Return current configuration as a dictionary.
:type resolve: bool
:arg resolve:
if ``True``, the ``schemes`` key will contain a list of
a :class:`~zdppy_password_hash.ifc.PasswordHash` objects instead of just
their names.
This method dumps the current configuration of the CryptContext
instance. The key/value pairs should be in the format accepted
by the :class:`!CryptContext` class constructor, in fact
``CryptContext(**myctx.to_dict())`` will create an exact copy of ``myctx``.
As an example::
>>> # you can dump the configuration of any crypt context...
>>> from zdppy_password_hash.apps import ldap_nocrypt_context
>>> ldap_nocrypt_context.to_dict()
{'schemes': ['ldap_salted_sha1',
'ldap_salted_md5',
'ldap_sha1',
'ldap_md5',
'ldap_plaintext']}
.. versionadded:: 1.6
This was previously available as ``CryptContext().policy.to_dict()``
.. seealso:: the :ref:`context-serialization-example` example in the tutorial.
"""
# XXX: should resolve default to conditional behavior
# based on presence of unregistered handlers?
render_key = self._render_config_key
return dict((render_key(key), value)
for key, value in self._config.iter_config(resolve))
def _write_to_parser(self, parser, section):
"""helper to write to ConfigParser instance"""
render_key = self._render_config_key
render_value = self._render_ini_value
parser.add_section(section)
for k,v in self._config.iter_config():
v = render_value(k, v)
k = render_key(k)
parser.set(section, k, v)
def to_string(self, section="zdppy_password_hash"):
"""serialize to INI format and return as unicode string.
:param section:
name of INI section to output, defaults to ``"zdppy_password_hash"``.
:returns:
CryptContext configuration, serialized to a INI unicode string.
This function acts exactly like :meth:`to_dict`, except that it
serializes all the contents into a single human-readable string,
which can be hand edited, and/or stored in a file. The
output of this method is accepted by :meth:`from_string`,
:meth:`from_path`, and :meth:`load`. As an example::
>>> # you can dump the configuration of any crypt context...
>>> from zdppy_password_hash.apps import ldap_nocrypt_context
>>> print ldap_nocrypt_context.to_string()
[zdppy_password_hash]
schemes = ldap_salted_sha1, ldap_salted_md5, ldap_sha1, ldap_md5, ldap_plaintext
.. versionadded:: 1.6
This was previously available as ``CryptContext().policy.to_string()``
.. seealso:: the :ref:`context-serialization-example` example in the tutorial.
"""
parser = SafeConfigParser()
self._write_to_parser(parser, section)
buf = NativeStringIO()
parser.write(buf)
unregistered = self._get_unregistered_handlers()
if unregistered:
buf.write((
"# NOTE: the %s handler(s) are not registered with Passlib,\n"
"# this string may not correctly reproduce the current configuration.\n\n"
) % ", ".join(repr(handler.name) for handler in unregistered))
out = buf.getvalue()
if not PY3:
out = out.decode("utf-8")
return out
# XXX: is this useful enough to enable?
##def write_to_path(self, path, section="zdppy_password_hash", update=False):
## "write to INI file"
## parser = ConfigParser()
## if update and os.path.exists(path):
## if not parser.read([path]):
## raise EnvironmentError("failed to read existing file")
## parser.remove_section(section)
## self._write_to_parser(parser, section)
## fh = file(path, "w")
## parser.write(fh)
## fh.close()
#===================================================================
# verify() hardening
# NOTE: this entire feature has been disabled.
# all contents of this section are NOOPs as of 1.7.1,
# and will be removed in 1.8.
#===================================================================
mvt_estimate_max_samples = 20
mvt_estimate_min_samples = 10
mvt_estimate_max_time = 2
mvt_estimate_resolution = 0.01
harden_verify = None
min_verify_time = 0
def reset_min_verify_time(self):
self._reset_dummy_verify()
#===================================================================
# password hash api
#===================================================================
# NOTE: all the following methods do is look up the appropriate
# custom handler for a given (scheme,category) combination,
# and hand off the real work to the handler itself,
# which is optimized for the specific (scheme,category) configuration.
#
# The custom handlers are cached inside the _CryptConfig
# instance stored in self._config, and are retrieved
# via get_record() and identify_record().
#
# _get_record() and _identify_record() are references
# to _config methods of the same name,
# stored in CryptContext for speed.
def _get_or_identify_record(self, hash, scheme=None, category=None):
"""return record based on scheme, or failing that, by identifying hash"""
if scheme:
if not isinstance(hash, unicode_or_bytes_types):
raise ExpectedStringError(hash, "hash")
return self._get_record(scheme, category)
else:
# hash typecheck handled by identify_record()
return self._identify_record(hash, category)
def _strip_unused_context_kwds(self, kwds, record):
"""
helper which removes any context keywords from **kwds**
that are known to be used by another scheme in this context,
but are NOT supported by handler specified by **record**.
.. note::
as optimization, load() will set this method to None on a per-instance basis
if there are no context kwds.
"""
if not kwds:
return
unused_kwds = self._config.context_kwds.difference(record.context_kwds)
for key in unused_kwds:
kwds.pop(key, None)
def needs_update(self, hash, scheme=None, category=None, secret=None):
"""Check if hash needs to be replaced for some reason,
in which case the secret should be re-hashed.
This function is the core of CryptContext's support for hash migration:
This function takes in a hash string, and checks the scheme,
number of rounds, and other properties against the current policy.
It returns ``True`` if the hash is using a deprecated scheme,
or is otherwise outside of the bounds specified by the policy
(e.g. the number of rounds is lower than :ref:`min_rounds <context-min-rounds-option>`
configuration for that algorithm).
If so, the password should be re-hashed using :meth:`hash`
Otherwise, it will return ``False``.
:type hash: unicode or bytes
:arg hash:
The hash string to examine.
:type scheme: str or None
:param scheme:
Optional scheme to use. Scheme must be one of the ones
configured for this context (see the
:ref:`schemes <context-schemes-option>` option).
If no scheme is specified, it will be identified
based on the value of *hash*.
.. deprecated:: 1.7
Support for this keyword is deprecated, and will be removed in Passlib 2.0.
:type category: str or None
:param category:
Optional :ref:`user category <user-categories>`.
If specified, this will cause any category-specific defaults to
be used when determining if the hash needs to be updated
(e.g. is below the minimum rounds).
:type secret: unicode, bytes, or None
:param secret:
Optional secret associated with the provided ``hash``.
This is not required, or even currently used for anything...
it's for forward-compatibility with any future
update checks that might need this information.
If provided, Passlib assumes the secret has already been
verified successfully against the hash.
.. versionadded:: 1.6
:returns: ``True`` if hash should be replaced, otherwise ``False``.
:raises ValueError:
If the hash did not match any of the configured :meth:`schemes`.
.. versionadded:: 1.6
This method was previously named :meth:`hash_needs_update`.
.. seealso:: the :ref:`context-migration-example` example in the tutorial.
"""
if scheme is not None:
# TODO: offer replacement alternative.
# ``context.handler(scheme).needs_update()`` would work,
# but may deprecate .handler() in zdppy_password_hash 1.8.
warn("CryptContext.needs_update(): 'scheme' keyword is deprecated as of "
"Passlib 1.7, and will be removed in Passlib 2.0",
DeprecationWarning)
record = self._get_or_identify_record(hash, scheme, category)
return record.deprecated or record.needs_update(hash, secret=secret)
@deprecated_method(deprecated="1.6", removed="2.0", replacement="CryptContext.needs_update()")
def hash_needs_update(self, hash, scheme=None, category=None):
"""Legacy alias for :meth:`needs_update`.
.. deprecated:: 1.6
This method was renamed to :meth:`!needs_update` in version 1.6.
This alias will be removed in version 2.0, and should only
be used for compatibility with Passlib 1.3 - 1.5.
"""
return self.needs_update(hash, scheme, category)
@deprecated_method(deprecated="1.7", removed="2.0")
def genconfig(self, scheme=None, category=None, **settings):
"""Generate a config string for specified scheme.
.. deprecated:: 1.7
This method will be removed in version 2.0, and should only
be used for compatibility with Passlib 1.3 - 1.6.
"""
record = self._get_record(scheme, category)
strip_unused = self._strip_unused_context_kwds
if strip_unused:
strip_unused(settings, record)
return record.genconfig(**settings)
@deprecated_method(deprecated="1.7", removed="2.0")
def genhash(self, secret, config, scheme=None, category=None, **kwds):
"""Generate hash for the specified secret using another hash.
.. deprecated:: 1.7
This method will be removed in version 2.0, and should only
be used for compatibility with Passlib 1.3 - 1.6.
"""
record = self._get_or_identify_record(config, scheme, category)
strip_unused = self._strip_unused_context_kwds
if strip_unused:
strip_unused(kwds, record)
return record.genhash(secret, config, **kwds)
def identify(self, hash, category=None, resolve=False, required=False,
unconfigured=False):
"""Attempt to identify which algorithm the hash belongs to.
Note that this will only consider the algorithms
currently configured for this context
(see the :ref:`schemes <context-schemes-option>` option).
All registered algorithms will be checked, from first to last,
and whichever one positively identifies the hash first will be returned.
:type hash: unicode or bytes
:arg hash:
The hash string to test.
:type category: str or None
:param category:
Optional :ref:`user category <user-categories>`.
Ignored by this function, this parameter
is provided for symmetry with the other methods.
:type resolve: bool
:param resolve:
If ``True``, returns the hash handler itself,
instead of the name of the hash.
:type required: bool
:param required:
If ``True``, this will raise a ValueError if the hash
cannot be identified, instead of returning ``None``.
:returns:
The handler which first identifies the hash,
or ``None`` if none of the algorithms identify the hash.
"""
record = self._identify_record(hash, category, required)
if record is None:
return None
elif resolve:
if unconfigured:
return record._Context__orig_handler
else:
return record
else:
return record.name
def hash(self, secret, scheme=None, category=None, **kwds):
"""run secret through selected algorithm, returning resulting hash.
:type secret: unicode or bytes
:arg secret:
the password to hash.
:type scheme: str or None
:param scheme:
Optional scheme to use. Scheme must be one of the ones
configured for this context (see the
:ref:`schemes <context-schemes-option>` option).
If no scheme is specified, the configured default
will be used.
.. deprecated:: 1.7
Support for this keyword is deprecated, and will be removed in Passlib 2.0.
:type category: str or None
:param category:
Optional :ref:`user category <user-categories>`.
If specified, this will cause any category-specific defaults to
be used when hashing the password (e.g. different default scheme,
different default rounds values, etc).
:param \\*\\*kwds:
All other keyword options are passed to the selected algorithm's
:meth:`PasswordHash.hash() <zdppy_password_hash.ifc.PasswordHash.hash>` method.
:returns:
The secret as encoded by the specified algorithm and options.
The return value will always be a :class:`!str`.
:raises TypeError, ValueError:
* If any of the arguments have an invalid type or value.
This includes any keywords passed to the underlying hash's
:meth:`PasswordHash.hash() <zdppy_password_hash.ifc.PasswordHash.hash>` method.
.. seealso:: the :ref:`context-basic-example` example in the tutorial
"""
# XXX: could insert normalization to preferred unicode encoding here
if scheme is not None:
# TODO: offer replacement alternative.
# ``context.handler(scheme).hash()`` would work,
# but may deprecate .handler() in zdppy_password_hash 1.8.
warn("CryptContext.hash(): 'scheme' keyword is deprecated as of "
"Passlib 1.7, and will be removed in Passlib 2.0",
DeprecationWarning)
record = self._get_record(scheme, category)
strip_unused = self._strip_unused_context_kwds
if strip_unused:
strip_unused(kwds, record)
return record.hash(secret, **kwds)
@deprecated_method(deprecated="1.7", removed="2.0", replacement="CryptContext.hash()")
def encrypt(self, *args, **kwds):
"""
Legacy alias for :meth:`hash`.
.. deprecated:: 1.7
This method was renamed to :meth:`!hash` in version 1.7.
This alias will be removed in version 2.0, and should only
be used for compatibility with Passlib 1.3 - 1.6.
"""
return self.hash(*args, **kwds)
def verify(self, secret, hash, scheme=None, category=None, **kwds):
"""verify secret against an existing hash.
If no scheme is specified, this will attempt to identify
the scheme based on the contents of the provided hash
(limited to the schemes configured for this context).
It will then check whether the password verifies against the hash.
:type secret: unicode or bytes
:arg secret:
the secret to verify
:type hash: unicode or bytes
:arg hash:
hash string to compare to
if ``None`` is passed in, this will be treated as "never verifying"
:type scheme: str
:param scheme:
Optionally force context to use specific scheme.
This is usually not needed, as most hashes can be unambiguously
identified. Scheme must be one of the ones configured
for this context
(see the :ref:`schemes <context-schemes-option>` option).
.. deprecated:: 1.7
Support for this keyword is deprecated, and will be removed in Passlib 2.0.
:type category: str or None
:param category:
Optional :ref:`user category <user-categories>` string.
This is mainly used when generating new hashes, it has little
effect when verifying; this keyword is mainly provided for symmetry.
:param \\*\\*kwds:
All additional keywords are passed to the appropriate handler,
and should match its :attr:`~zdppy_password_hash.ifc.PasswordHash.context_kwds`.
:returns:
``True`` if the password matched the hash, else ``False``.
:raises ValueError:
* if the hash did not match any of the configured :meth:`schemes`.
* if any of the arguments have an invalid value (this includes
any keywords passed to the underlying hash's
:meth:`PasswordHash.verify() <zdppy_password_hash.ifc.PasswordHash.verify>` method).
:raises TypeError:
* if any of the arguments have an invalid type (this includes
any keywords passed to the underlying hash's
:meth:`PasswordHash.verify() <zdppy_password_hash.ifc.PasswordHash.verify>` method).
.. seealso:: the :ref:`context-basic-example` example in the tutorial
"""
# XXX: could insert normalization to preferred unicode encoding here
# XXX: what about supporting a setter() callback ala django 1.4 ?
if scheme is not None:
# TODO: offer replacement alternative.
# ``context.handler(scheme).verify()`` would work,
# but may deprecate .handler() in zdppy_password_hash 1.8.
warn("CryptContext.verify(): 'scheme' keyword is deprecated as of "
"Passlib 1.7, and will be removed in Passlib 2.0",
DeprecationWarning)
if hash is None:
# convenience feature -- let apps pass in hash=None when user
# isn't found / has no hash; useful because it invokes dummy_verify()
self.dummy_verify()
return False
record = self._get_or_identify_record(hash, scheme, category)
strip_unused = self._strip_unused_context_kwds
if strip_unused:
strip_unused(kwds, record)
return record.verify(secret, hash, **kwds)
def verify_and_update(self, secret, hash, scheme=None, category=None, **kwds):
"""verify password and re-hash the password if needed, all in a single call.
This is a convenience method which takes care of all the following:
first it verifies the password (:meth:`~CryptContext.verify`), if this is successfull
it checks if the hash needs updating (:meth:`~CryptContext.needs_update`), and if so,
re-hashes the password (:meth:`~CryptContext.hash`), returning the replacement hash.
This series of steps is a very common task for applications
which wish to update deprecated hashes, and this call takes
care of all 3 steps efficiently.
:type secret: unicode or bytes
:arg secret:
the secret to verify
:type secret: unicode or bytes
:arg hash:
hash string to compare to.
if ``None`` is passed in, this will be treated as "never verifying"
:type scheme: str
:param scheme:
Optionally force context to use specific scheme.
This is usually not needed, as most hashes can be unambiguously
identified. Scheme must be one of the ones configured
for this context
(see the :ref:`schemes <context-schemes-option>` option).
.. deprecated:: 1.7
Support for this keyword is deprecated, and will be removed in Passlib 2.0.
:type category: str or None
:param category:
Optional :ref:`user category <user-categories>`.
If specified, this will cause any category-specific defaults to
be used if the password has to be re-hashed.
:param \\*\\*kwds:
all additional keywords are passed to the appropriate handler,
and should match that hash's
:attr:`PasswordHash.context_kwds <zdppy_password_hash.ifc.PasswordHash.context_kwds>`.
:returns:
This function returns a tuple containing two elements:
``(verified, replacement_hash)``. The first is a boolean
flag indicating whether the password verified,
and the second an optional replacement hash.
The tuple will always match one of the following 3 cases:
* ``(False, None)`` indicates the secret failed to verify.
* ``(True, None)`` indicates the secret verified correctly,
and the hash does not need updating.
* ``(True, str)`` indicates the secret verified correctly,
but the current hash needs to be updated. The :class:`!str`
will be the freshly generated hash, to replace the old one.
:raises TypeError, ValueError:
For the same reasons as :meth:`verify`.
.. seealso:: the :ref:`context-migration-example` example in the tutorial.
"""
# XXX: could insert normalization to preferred unicode encoding here.
if scheme is not None:
warn("CryptContext.verify(): 'scheme' keyword is deprecated as of "
"Passlib 1.7, and will be removed in Passlib 2.0",
DeprecationWarning)
if hash is None:
# convenience feature -- let apps pass in hash=None when user
# isn't found / has no hash; useful because it invokes dummy_verify()
self.dummy_verify()
return False, None
record = self._get_or_identify_record(hash, scheme, category)
strip_unused = self._strip_unused_context_kwds
if strip_unused and kwds:
clean_kwds = kwds.copy()
strip_unused(clean_kwds, record)
else:
clean_kwds = kwds
# XXX: if record is default scheme, could extend PasswordHash
# api to combine verify & needs_update to single call,
# potentially saving some round-trip parsing.
# but might make these codepaths more complex...
if not record.verify(secret, hash, **clean_kwds):
return False, None
elif record.deprecated or record.needs_update(hash, secret=secret):
# NOTE: we re-hash with default scheme, not current one.
return True, self.hash(secret, category=category, **kwds)
else:
return True, None
#===================================================================
# missing-user helper
#===================================================================
#: secret used for dummy_verify()
_dummy_secret = "too many secrets"
@memoized_property
def _dummy_hash(self):
"""
precalculated hash for dummy_verify() to use
"""
return self.hash(self._dummy_secret)
def _reset_dummy_verify(self):
"""
flush memoized values used by dummy_verify()
"""
type(self)._dummy_hash.clear_cache(self)
def dummy_verify(self, elapsed=0):
"""
Helper that applications can call when user wasn't found,
in order to simulate time it would take to hash a password.
Runs verify() against a dummy hash, to simulate verification
of a real account password.
:param elapsed:
.. deprecated:: 1.7.1
this option is ignored, and will be removed in zdppy_password_hash 1.8.
.. versionadded:: 1.7
"""
self.verify(self._dummy_secret, self._dummy_hash)
return False
#===================================================================
# disabled hash support
#===================================================================
def is_enabled(self, hash):
"""
test if hash represents a usuable password --
i.e. does not represent an unusuable password such as ``"!"``,
which is recognized by the :class:`~zdppy_password_hash.hash.unix_disabled` hash.
:raises ValueError:
if the hash is not recognized
(typically solved by adding ``unix_disabled`` to the list of schemes).
"""
return not self._identify_record(hash, None).is_disabled
def disable(self, hash=None):
"""
return a string to disable logins for user,
usually by returning a non-verifying string such as ``"!"``.
:param hash:
Callers can optionally provide the account's existing hash.
Some disabled handlers (such as :class:`!unix_disabled`)
will encode this into the returned value,
so that it can be recovered via :meth:`enable`.
:raises RuntimeError:
if this function is called w/o a disabled hasher
(such as :class:`~zdppy_password_hash.hash.unix_disabled`) included
in the list of schemes.
:returns:
hash string which will be recognized as valid by the context,
but is guaranteed to not validate against *any* password.
"""
record = self._config.disabled_record
assert record.is_disabled
return record.disable(hash)
def enable(self, hash):
"""
inverse of :meth:`disable` --
attempts to recover original hash which was converted
by a :meth:`!disable` call into a disabled hash --
thus restoring the user's original password.
:raises ValueError:
if original hash not present, or if the disabled handler doesn't
support encoding the original hash (e.g. ``django_disabled``)
:returns:
the original hash.
"""
record = self._identify_record(hash, None)
if record.is_disabled:
# XXX: should we throw error if result can't be identified by context?
return record.enable(hash)
else:
# hash wasn't a disabled hash, so return unchanged
return hash
#===================================================================
# eoc
#===================================================================
class LazyCryptContext(CryptContext):
"""CryptContext subclass which doesn't load handlers until needed.
This is a subclass of CryptContext which takes in a set of arguments
exactly like CryptContext, but won't import any handlers
(or even parse its arguments) until
the first time one of its methods is accessed.
:arg schemes:
The first positional argument can be a list of schemes, or omitted,
just like CryptContext.
:param onload:
If a callable is passed in via this keyword,
it will be invoked at lazy-load time
with the following signature:
``onload(**kwds) -> kwds``;
where ``kwds`` is all the additional kwds passed to LazyCryptContext.
It should perform any additional deferred initialization,
and return the final dict of options to be passed to CryptContext.
.. versionadded:: 1.6
:param create_policy:
.. deprecated:: 1.6
This option will be removed in Passlib 1.8,
applications should use ``onload`` instead.
:param kwds:
All additional keywords are passed to CryptContext;
or to the *onload* function (if provided).
This is mainly used internally by modules such as :mod:`zdppy_password_hash.apps`,
which define a large number of contexts, but only a few of them will be needed
at any one time. Use of this class saves the memory needed to import
the specified handlers until the context instance is actually accessed.
As well, it allows constructing a context at *module-init* time,
but using :func:`!onload()` to provide dynamic configuration
at *application-run* time.
.. note::
This class is only useful if you're referencing handler objects by name,
and don't want them imported until runtime. If you want to have the config
validated before your application runs, or are passing in already-imported
handler instances, you should use :class:`CryptContext` instead.
.. versionadded:: 1.4
"""
_lazy_kwds = None
# NOTE: the way this class works changed in 1.6.
# previously it just called _lazy_init() when ``.policy`` was
# first accessed. now that is done whenever any of the public
# attributes are accessed, and the class itself is changed
# to a regular CryptContext, to remove the overhead once it's unneeded.
def __init__(self, schemes=None, **kwds):
if schemes is not None:
kwds['schemes'] = schemes
self._lazy_kwds = kwds
def _lazy_init(self):
kwds = self._lazy_kwds
if 'create_policy' in kwds:
warn("The CryptPolicy class, and LazyCryptContext's "
"``create_policy`` keyword have been deprecated as of "
"Passlib 1.6, and will be removed in Passlib 1.8; "
"please use the ``onload`` keyword instead.",
DeprecationWarning)
create_policy = kwds.pop("create_policy")
result = create_policy(**kwds)
policy = CryptPolicy.from_source(result, _warn=False)
kwds = policy._context.to_dict()
elif 'onload' in kwds:
onload = kwds.pop("onload")
kwds = onload(**kwds)
del self._lazy_kwds
super(LazyCryptContext, self).__init__(**kwds)
self.__class__ = CryptContext
def __getattribute__(self, attr):
if (not attr.startswith("_") or attr.startswith("__")) and \
self._lazy_kwds is not None:
self._lazy_init()
return object.__getattribute__(self, attr)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/context.py | context.py |
#=============================================================================
# imports
#=============================================================================
# core
import logging; log = logging.getLogger(__name__)
from itertools import chain
# site
# pkg
from zdppy_password_hash import hash
from zdppy_password_hash.context import LazyCryptContext
from zdppy_password_hash.utils import sys_bits
# local
__all__ = [
'custom_app_context',
'django_context',
'ldap_context', 'ldap_nocrypt_context',
'mysql_context', 'mysql4_context', 'mysql3_context',
'phpass_context',
'phpbb3_context',
'postgres_context',
]
#=============================================================================
# master containing all identifiable hashes
#=============================================================================
def _load_master_config():
from zdppy_password_hash.registry import list_crypt_handlers
# get master list
schemes = list_crypt_handlers()
# exclude the ones we know have ambiguous or greedy identify() methods.
excluded = [
# frequently confused for eachother
'bigcrypt',
'crypt16',
# no good identifiers
'cisco_pix',
'cisco_type7',
'htdigest',
'mysql323',
'oracle10',
# all have same size
'lmhash',
'msdcc',
'msdcc2',
'nthash',
# plaintext handlers
'plaintext',
'ldap_plaintext',
# disabled handlers
'django_disabled',
'unix_disabled',
'unix_fallback',
]
for name in excluded:
schemes.remove(name)
# return config
return dict(schemes=schemes, default="sha256_crypt")
master_context = LazyCryptContext(onload=_load_master_config)
#=============================================================================
# for quickly bootstrapping new custom applications
#=============================================================================
custom_app_context = LazyCryptContext(
# choose some reasonbly strong schemes
schemes=["sha512_crypt", "sha256_crypt"],
# set some useful global options
default="sha256_crypt" if sys_bits < 64 else "sha512_crypt",
# set a good starting point for rounds selection
sha512_crypt__min_rounds = 535000,
sha256_crypt__min_rounds = 535000,
# if the admin user category is selected, make a much stronger hash,
admin__sha512_crypt__min_rounds = 1024000,
admin__sha256_crypt__min_rounds = 1024000,
)
#=============================================================================
# django
#=============================================================================
#-----------------------------------------------------------------------
# 1.0
#-----------------------------------------------------------------------
_django10_schemes = [
"django_salted_sha1",
"django_salted_md5",
"django_des_crypt",
"hex_md5",
"django_disabled",
]
django10_context = LazyCryptContext(
schemes=_django10_schemes,
default="django_salted_sha1",
deprecated=["hex_md5"],
)
#-----------------------------------------------------------------------
# 1.4
#-----------------------------------------------------------------------
_django14_schemes = [
"django_pbkdf2_sha256",
"django_pbkdf2_sha1",
"django_bcrypt"
] + _django10_schemes
django14_context = LazyCryptContext(
schemes=_django14_schemes,
deprecated=_django10_schemes,
)
#-----------------------------------------------------------------------
# 1.6
#-----------------------------------------------------------------------
_django16_schemes = list(_django14_schemes)
_django16_schemes.insert(1, "django_bcrypt_sha256")
django16_context = LazyCryptContext(
schemes=_django16_schemes,
deprecated=_django10_schemes,
)
#-----------------------------------------------------------------------
# 1.10
#-----------------------------------------------------------------------
_django_110_schemes = [
"django_pbkdf2_sha256",
"django_pbkdf2_sha1",
"django_argon2",
"django_bcrypt",
"django_bcrypt_sha256",
"django_disabled",
]
django110_context = LazyCryptContext(schemes=_django_110_schemes)
#-----------------------------------------------------------------------
# 2.1
#-----------------------------------------------------------------------
_django21_schemes = list(_django_110_schemes)
_django21_schemes.remove("django_bcrypt")
django21_context = LazyCryptContext(schemes=_django21_schemes)
#-----------------------------------------------------------------------
# latest
#-----------------------------------------------------------------------
# this will always point to latest version in zdppy_password_hash
django_context = django21_context
#=============================================================================
# ldap
#=============================================================================
#: standard ldap schemes
std_ldap_schemes = [
"ldap_salted_sha512",
"ldap_salted_sha256",
"ldap_salted_sha1",
"ldap_salted_md5",
"ldap_sha1",
"ldap_md5",
"ldap_plaintext",
]
# create context with all std ldap schemes EXCEPT crypt
ldap_nocrypt_context = LazyCryptContext(std_ldap_schemes)
# create context with all possible std ldap + ldap crypt schemes
def _iter_ldap_crypt_schemes():
from zdppy_password_hash.utils import unix_crypt_schemes
return ('ldap_' + name for name in unix_crypt_schemes)
def _iter_ldap_schemes():
"""helper which iterates over supported std ldap schemes"""
return chain(std_ldap_schemes, _iter_ldap_crypt_schemes())
ldap_context = LazyCryptContext(_iter_ldap_schemes())
### create context with all std ldap schemes + crypt schemes for localhost
##def _iter_host_ldap_schemes():
## "helper which iterates over supported std ldap schemes"
## from zdppy_password_hash.handlers.ldap_digests import get_host_ldap_crypt_schemes
## return chain(std_ldap_schemes, get_host_ldap_crypt_schemes())
##ldap_host_context = LazyCryptContext(_iter_host_ldap_schemes())
#=============================================================================
# mysql
#=============================================================================
mysql3_context = LazyCryptContext(["mysql323"])
mysql4_context = LazyCryptContext(["mysql41", "mysql323"], deprecated="mysql323")
mysql_context = mysql4_context # tracks latest mysql version supported
#=============================================================================
# postgres
#=============================================================================
postgres_context = LazyCryptContext(["postgres_md5"])
#=============================================================================
# phpass & variants
#=============================================================================
def _create_phpass_policy(**kwds):
"""helper to choose default alg based on bcrypt availability"""
kwds['default'] = 'bcrypt' if hash.bcrypt.has_backend() else 'phpass'
return kwds
phpass_context = LazyCryptContext(
schemes=["bcrypt", "phpass", "bsdi_crypt"],
onload=_create_phpass_policy,
)
phpbb3_context = LazyCryptContext(["phpass"], phpass__ident="H")
# TODO: support the drupal phpass variants (see phpass homepage)
#=============================================================================
# roundup
#=============================================================================
_std_roundup_schemes = [ "ldap_hex_sha1", "ldap_hex_md5", "ldap_des_crypt", "roundup_plaintext" ]
roundup10_context = LazyCryptContext(_std_roundup_schemes)
# NOTE: 'roundup15' really applies to roundup 1.4.17+
roundup_context = roundup15_context = LazyCryptContext(
schemes=_std_roundup_schemes + [ "ldap_pbkdf2_sha1" ],
deprecated=_std_roundup_schemes,
default = "ldap_pbkdf2_sha1",
ldap_pbkdf2_sha1__default_rounds = 10000,
)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/apps.py | apps.py |
# XXX: relocate this to zdppy_password_hash.ext.apache?
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import logging; log = logging.getLogger(__name__)
import os
from warnings import warn
# site
# pkg
from zdppy_password_hash import exc, registry
from zdppy_password_hash.context import CryptContext
from zdppy_password_hash.exc import ExpectedStringError
from zdppy_password_hash.hash import htdigest
from zdppy_password_hash.utils import render_bytes, to_bytes, is_ascii_codec
from zdppy_password_hash.utils.decor import deprecated_method
from zdppy_password_hash.utils.compat import join_bytes, unicode, BytesIO, PY3
# local
__all__ = [
'HtpasswdFile',
'HtdigestFile',
]
#=============================================================================
# constants & support
#=============================================================================
_UNSET = object()
_BCOLON = b":"
_BHASH = b"#"
# byte values that aren't allowed in fields.
_INVALID_FIELD_CHARS = b":\n\r\t\x00"
#: _CommonFile._source token types
_SKIPPED = "skipped"
_RECORD = "record"
#=============================================================================
# common helpers
#=============================================================================
class _CommonFile(object):
"""common framework for HtpasswdFile & HtdigestFile"""
#===================================================================
# instance attrs
#===================================================================
# charset encoding used by file (defaults to utf-8)
encoding = None
# whether users() and other public methods should return unicode or bytes?
# (defaults to False under PY2, True under PY3)
return_unicode = None
# if bound to local file, these will be set.
_path = None # local file path
_mtime = None # mtime when last loaded, or 0
# if true, automatically save to local file after changes are made.
autosave = False
# dict mapping key -> value for all records in database.
# (e.g. user => hash for Htpasswd)
_records = None
#: list of tokens for recreating original file contents when saving. if present,
#: will be sequence of (_SKIPPED, b"whitespace/comments") and (_RECORD, <record key>) tuples.
_source = None
#===================================================================
# alt constuctors
#===================================================================
@classmethod
def from_string(cls, data, **kwds):
"""create new object from raw string.
:type data: unicode or bytes
:arg data:
database to load, as single string.
:param \\*\\*kwds:
all other keywords are the same as in the class constructor
"""
if 'path' in kwds:
raise TypeError("'path' not accepted by from_string()")
self = cls(**kwds)
self.load_string(data)
return self
@classmethod
def from_path(cls, path, **kwds):
"""create new object from file, without binding object to file.
:type path: str
:arg path:
local filepath to load from
:param \\*\\*kwds:
all other keywords are the same as in the class constructor
"""
self = cls(**kwds)
self.load(path)
return self
#===================================================================
# init
#===================================================================
def __init__(self, path=None, new=False, autoload=True, autosave=False,
encoding="utf-8", return_unicode=PY3,
):
# set encoding
if not encoding:
warn("``encoding=None`` is deprecated as of Passlib 1.6, "
"and will cause a ValueError in Passlib 1.8, "
"use ``return_unicode=False`` instead.",
DeprecationWarning, stacklevel=2)
encoding = "utf-8"
return_unicode = False
elif not is_ascii_codec(encoding):
# htpasswd/htdigest files assumes 1-byte chars, and use ":" separator,
# so only ascii-compatible encodings are allowed.
raise ValueError("encoding must be 7-bit ascii compatible")
self.encoding = encoding
# set other attrs
self.return_unicode = return_unicode
self.autosave = autosave
self._path = path
self._mtime = 0
# init db
if not autoload:
warn("``autoload=False`` is deprecated as of Passlib 1.6, "
"and will be removed in Passlib 1.8, use ``new=True`` instead",
DeprecationWarning, stacklevel=2)
new = True
if path and not new:
self.load()
else:
self._records = {}
self._source = []
def __repr__(self):
tail = ''
if self.autosave:
tail += ' autosave=True'
if self._path:
tail += ' path=%r' % self._path
if self.encoding != "utf-8":
tail += ' encoding=%r' % self.encoding
return "<%s 0x%0x%s>" % (self.__class__.__name__, id(self), tail)
# NOTE: ``path`` is a property so that ``_mtime`` is wiped when it's set.
@property
def path(self):
return self._path
@path.setter
def path(self, value):
if value != self._path:
self._mtime = 0
self._path = value
@property
def mtime(self):
"""modify time when last loaded (if bound to a local file)"""
return self._mtime
#===================================================================
# loading
#===================================================================
def load_if_changed(self):
"""Reload from ``self.path`` only if file has changed since last load"""
if not self._path:
raise RuntimeError("%r is not bound to a local file" % self)
if self._mtime and self._mtime == os.path.getmtime(self._path):
return False
self.load()
return True
def load(self, path=None, force=True):
"""Load state from local file.
If no path is specified, attempts to load from ``self.path``.
:type path: str
:arg path: local file to load from
:type force: bool
:param force:
if ``force=False``, only load from ``self.path`` if file
has changed since last load.
.. deprecated:: 1.6
This keyword will be removed in Passlib 1.8;
Applications should use :meth:`load_if_changed` instead.
"""
if path is not None:
with open(path, "rb") as fh:
self._mtime = 0
self._load_lines(fh)
elif not force:
warn("%(name)s.load(force=False) is deprecated as of Passlib 1.6,"
"and will be removed in Passlib 1.8; "
"use %(name)s.load_if_changed() instead." %
dict(name=self.__class__.__name__),
DeprecationWarning, stacklevel=2)
return self.load_if_changed()
elif self._path:
with open(self._path, "rb") as fh:
self._mtime = os.path.getmtime(self._path)
self._load_lines(fh)
else:
raise RuntimeError("%s().path is not set, an explicit path is required" %
self.__class__.__name__)
return True
def load_string(self, data):
"""Load state from unicode or bytes string, replacing current state"""
data = to_bytes(data, self.encoding, "data")
self._mtime = 0
self._load_lines(BytesIO(data))
def _load_lines(self, lines):
"""load from sequence of lists"""
parse = self._parse_record
records = {}
source = []
skipped = b''
for idx, line in enumerate(lines):
# NOTE: per htpasswd source (https://github.com/apache/httpd/blob/trunk/support/htpasswd.c),
# lines with only whitespace, or with "#" as first non-whitespace char,
# are left alone / ignored.
tmp = line.lstrip()
if not tmp or tmp.startswith(_BHASH):
skipped += line
continue
# parse valid line
key, value = parse(line, idx+1)
# NOTE: if multiple entries for a key, we use the first one,
# which seems to match htpasswd source
if key in records:
log.warning("username occurs multiple times in source file: %r" % key)
skipped += line
continue
# flush buffer of skipped whitespace lines
if skipped:
source.append((_SKIPPED, skipped))
skipped = b''
# store new user line
records[key] = value
source.append((_RECORD, key))
# don't bother preserving trailing whitespace, but do preserve trailing comments
if skipped.rstrip():
source.append((_SKIPPED, skipped))
# NOTE: not replacing ._records until parsing succeeds, so loading is atomic.
self._records = records
self._source = source
def _parse_record(self, record, lineno): # pragma: no cover - abstract method
"""parse line of file into (key, value) pair"""
raise NotImplementedError("should be implemented in subclass")
def _set_record(self, key, value):
"""
helper for setting record which takes care of inserting source line if needed;
:returns:
bool if key already present
"""
records = self._records
existing = (key in records)
records[key] = value
if not existing:
self._source.append((_RECORD, key))
return existing
#===================================================================
# saving
#===================================================================
def _autosave(self):
"""subclass helper to call save() after any changes"""
if self.autosave and self._path:
self.save()
def save(self, path=None):
"""Save current state to file.
If no path is specified, attempts to save to ``self.path``.
"""
if path is not None:
with open(path, "wb") as fh:
fh.writelines(self._iter_lines())
elif self._path:
self.save(self._path)
self._mtime = os.path.getmtime(self._path)
else:
raise RuntimeError("%s().path is not set, cannot autosave" %
self.__class__.__name__)
def to_string(self):
"""Export current state as a string of bytes"""
return join_bytes(self._iter_lines())
# def clean(self):
# """
# discard any comments or whitespace that were being preserved from the source file,
# and re-sort keys in alphabetical order
# """
# self._source = [(_RECORD, key) for key in sorted(self._records)]
# self._autosave()
def _iter_lines(self):
"""iterator yielding lines of database"""
# NOTE: this relies on <records> being an OrderedDict so that it outputs
# records in a deterministic order.
records = self._records
if __debug__:
pending = set(records)
for action, content in self._source:
if action == _SKIPPED:
# 'content' is whitespace/comments to write
yield content
else:
assert action == _RECORD
# 'content' is record key
if content not in records:
# record was deleted
# NOTE: doing it lazily like this so deleting & re-adding user
# preserves their original location in the file.
continue
yield self._render_record(content, records[content])
if __debug__:
pending.remove(content)
if __debug__:
# sanity check that we actually wrote all the records
# (otherwise _source & _records are somehow out of sync)
assert not pending, "failed to write all records: missing=%r" % (pending,)
def _render_record(self, key, value): # pragma: no cover - abstract method
"""given key/value pair, encode as line of file"""
raise NotImplementedError("should be implemented in subclass")
#===================================================================
# field encoding
#===================================================================
def _encode_user(self, user):
"""user-specific wrapper for _encode_field()"""
return self._encode_field(user, "user")
def _encode_realm(self, realm): # pragma: no cover - abstract method
"""realm-specific wrapper for _encode_field()"""
return self._encode_field(realm, "realm")
def _encode_field(self, value, param="field"):
"""convert field to internal representation.
internal representation is always bytes. byte strings are left as-is,
unicode strings encoding using file's default encoding (or ``utf-8``
if no encoding has been specified).
:raises UnicodeEncodeError:
if unicode value cannot be encoded using default encoding.
:raises ValueError:
if resulting byte string contains a forbidden character,
or is too long (>255 bytes).
:returns:
encoded identifer as bytes
"""
if isinstance(value, unicode):
value = value.encode(self.encoding)
elif not isinstance(value, bytes):
raise ExpectedStringError(value, param)
if len(value) > 255:
raise ValueError("%s must be at most 255 characters: %r" %
(param, value))
if any(c in _INVALID_FIELD_CHARS for c in value):
raise ValueError("%s contains invalid characters: %r" %
(param, value,))
return value
def _decode_field(self, value):
"""decode field from internal representation to format
returns by users() method, etc.
:raises UnicodeDecodeError:
if unicode value cannot be decoded using default encoding.
(usually indicates wrong encoding set for file).
:returns:
field as unicode or bytes, as appropriate.
"""
assert isinstance(value, bytes), "expected value to be bytes"
if self.return_unicode:
return value.decode(self.encoding)
else:
return value
# FIXME: htpasswd doc says passwords limited to 255 chars under Windows & MPE,
# and that longer ones are truncated. this may be side-effect of those
# platforms supporting the 'plaintext' scheme. these classes don't currently
# check for this.
#===================================================================
# eoc
#===================================================================
#=============================================================================
# htpasswd context
#
# This section sets up a CryptContexts to mimic what schemes Apache
# (and the htpasswd tool) should support on the current system.
#
# Apache has long-time supported some basic builtin schemes (listed below),
# as well as the host's crypt() method -- though it's limited to being able
# to *verify* any scheme using that method, but can only generate "des_crypt" hashes.
#
# Apache 2.4 added builtin bcrypt support (even for platforms w/o native support).
# c.f. http://httpd.apache.org/docs/2.4/programs/htpasswd.html vs the 2.2 docs.
#=============================================================================
#: set of default schemes that (if chosen) should be using bcrypt,
#: but can't due to lack of bcrypt.
_warn_no_bcrypt = set()
def _init_default_schemes():
#: pick strongest one for host
host_best = None
for name in ["bcrypt", "sha256_crypt"]:
if registry.has_os_crypt_support(name):
host_best = name
break
# check if we have a bcrypt backend -- otherwise issue warning
# XXX: would like to not spam this unless the user *requests* apache 24
bcrypt = "bcrypt" if registry.has_backend("bcrypt") else None
_warn_no_bcrypt.clear()
if not bcrypt:
_warn_no_bcrypt.update(["portable_apache_24", "host_apache_24",
"linux_apache_24", "portable", "host"])
defaults = dict(
# strongest hash builtin to specific apache version
portable_apache_24=bcrypt or "apr_md5_crypt",
portable_apache_22="apr_md5_crypt",
# strongest hash across current host & specific apache version
host_apache_24=bcrypt or host_best or "apr_md5_crypt",
host_apache_22=host_best or "apr_md5_crypt",
# strongest hash on a linux host
linux_apache_24=bcrypt or "sha256_crypt",
linux_apache_22="sha256_crypt",
)
# set latest-apache version aliases
# XXX: could check for apache install, and pick correct host 22/24 default?
# could reuse _detect_htpasswd() helper in UTs
defaults.update(
portable=defaults['portable_apache_24'],
host=defaults['host_apache_24'],
)
return defaults
#: dict mapping default alias -> appropriate scheme
htpasswd_defaults = _init_default_schemes()
def _init_htpasswd_context():
# start with schemes built into apache
schemes = [
# builtin support added in apache 2.4
# (https://bz.apache.org/bugzilla/show_bug.cgi?id=49288)
"bcrypt",
# support not "builtin" to apache, instead it requires support through host's crypt().
# adding them here to allow editing htpasswd under windows and then deploying under unix.
"sha256_crypt",
"sha512_crypt",
"des_crypt",
# apache default as of 2.2.18, and still default in 2.4
"apr_md5_crypt",
# NOTE: apache says ONLY intended for transitioning htpasswd <-> ldap
"ldap_sha1",
# NOTE: apache says ONLY supported on Windows, Netware, TPF
"plaintext"
]
# apache can verify anything supported by the native crypt(),
# though htpasswd tool can only generate a limited set of hashes.
# (this list may overlap w/ builtin apache schemes)
schemes.extend(registry.get_supported_os_crypt_schemes())
# hack to remove dups and sort into preferred order
preferred = schemes[:3] + ["apr_md5_crypt"] + schemes
schemes = sorted(set(schemes), key=preferred.index)
# create context object
return CryptContext(
schemes=schemes,
# NOTE: default will change to "portable" in zdppy_password_hash 2.0
default=htpasswd_defaults['portable_apache_22'],
# NOTE: bcrypt "2y" is required, "2b" isn't recognized by libapr (issue 95)
bcrypt__ident="2y",
)
#: CryptContext configured to match htpasswd
htpasswd_context = _init_htpasswd_context()
#=============================================================================
# htpasswd editing
#=============================================================================
class HtpasswdFile(_CommonFile):
"""class for reading & writing Htpasswd files.
The class constructor accepts the following arguments:
:type path: filepath
:param path:
Specifies path to htpasswd file, use to implicitly load from and save to.
This class has two modes of operation:
1. It can be "bound" to a local file by passing a ``path`` to the class
constructor. In this case it will load the contents of the file when
created, and the :meth:`load` and :meth:`save` methods will automatically
load from and save to that file if they are called without arguments.
2. Alternately, it can exist as an independant object, in which case
:meth:`load` and :meth:`save` will require an explicit path to be
provided whenever they are called. As well, ``autosave`` behavior
will not be available.
This feature is new in Passlib 1.6, and is the default if no
``path`` value is provided to the constructor.
This is also exposed as a readonly instance attribute.
:type new: bool
:param new:
Normally, if *path* is specified, :class:`HtpasswdFile` will
immediately load the contents of the file. However, when creating
a new htpasswd file, applications can set ``new=True`` so that
the existing file (if any) will not be loaded.
.. versionadded:: 1.6
This feature was previously enabled by setting ``autoload=False``.
That alias has been deprecated, and will be removed in Passlib 1.8
:type autosave: bool
:param autosave:
Normally, any changes made to an :class:`HtpasswdFile` instance
will not be saved until :meth:`save` is explicitly called. However,
if ``autosave=True`` is specified, any changes made will be
saved to disk immediately (assuming *path* has been set).
This is also exposed as a writeable instance attribute.
:type encoding: str
:param encoding:
Optionally specify character encoding used to read/write file
and hash passwords. Defaults to ``utf-8``, though ``latin-1``
is the only other commonly encountered encoding.
This is also exposed as a readonly instance attribute.
:type default_scheme: str
:param default_scheme:
Optionally specify default scheme to use when encoding new passwords.
This can be any of the schemes with builtin Apache support,
OR natively supported by the host OS's :func:`crypt.crypt` function.
* Builtin schemes include ``"bcrypt"`` (apache 2.4+), ``"apr_md5_crypt"`,
and ``"des_crypt"``.
* Schemes commonly supported by Unix hosts
include ``"bcrypt"``, ``"sha256_crypt"``, and ``"des_crypt"``.
In order to not have to sort out what you should use,
zdppy_password_hash offers a number of aliases, that will resolve
to the most appropriate scheme based on your needs:
* ``"portable"``, ``"portable_apache_24"`` -- pick scheme that's portable across hosts
running apache >= 2.4. **This will be the default as of Passlib 2.0**.
* ``"portable_apache_22"`` -- pick scheme that's portable across hosts
running apache >= 2.4. **This is the default up to Passlib 1.9**.
* ``"host"``, ``"host_apache_24"`` -- pick strongest scheme supported by
apache >= 2.4 and/or host OS.
* ``"host_apache_22"`` -- pick strongest scheme supported by
apache >= 2.2 and/or host OS.
.. versionadded:: 1.6
This keyword was previously named ``default``. That alias
has been deprecated, and will be removed in Passlib 1.8.
.. versionchanged:: 1.6.3
Added support for ``"bcrypt"``, ``"sha256_crypt"``, and ``"portable"`` alias.
.. versionchanged:: 1.7
Added apache 2.4 semantics, and additional aliases.
:type context: :class:`~zdppy_password_hash.context.CryptContext`
:param context:
:class:`!CryptContext` instance used to create
and verify the hashes found in the htpasswd file.
The default value is a pre-built context which supports all
of the hashes officially allowed in an htpasswd file.
This is also exposed as a readonly instance attribute.
.. warning::
This option may be used to add support for non-standard hash
formats to an htpasswd file. However, the resulting file
will probably not be usable by another application,
and particularly not by Apache.
:param autoload:
Set to ``False`` to prevent the constructor from automatically
loaded the file from disk.
.. deprecated:: 1.6
This has been replaced by the *new* keyword.
Instead of setting ``autoload=False``, you should use
``new=True``. Support for this keyword will be removed
in Passlib 1.8.
:param default:
Change the default algorithm used to hash new passwords.
.. deprecated:: 1.6
This has been renamed to *default_scheme* for clarity.
Support for this alias will be removed in Passlib 1.8.
Loading & Saving
================
.. automethod:: load
.. automethod:: load_if_changed
.. automethod:: load_string
.. automethod:: save
.. automethod:: to_string
Inspection
================
.. automethod:: users
.. automethod:: check_password
.. automethod:: get_hash
Modification
================
.. automethod:: set_password
.. automethod:: delete
Alternate Constructors
======================
.. automethod:: from_string
Attributes
==========
.. attribute:: path
Path to local file that will be used as the default
for all :meth:`load` and :meth:`save` operations.
May be written to, initialized by the *path* constructor keyword.
.. attribute:: autosave
Writeable flag indicating whether changes will be automatically
written to *path*.
Errors
======
:raises ValueError:
All of the methods in this class will raise a :exc:`ValueError` if
any user name contains a forbidden character (one of ``:\\r\\n\\t\\x00``),
or is longer than 255 characters.
"""
#===================================================================
# instance attrs
#===================================================================
# NOTE: _records map stores <user> for the key, and <hash> for the value,
# both in bytes which use self.encoding
#===================================================================
# init & serialization
#===================================================================
def __init__(self, path=None, default_scheme=None, context=htpasswd_context,
**kwds):
if 'default' in kwds:
warn("``default`` is deprecated as of Passlib 1.6, "
"and will be removed in Passlib 1.8, it has been renamed "
"to ``default_scheem``.",
DeprecationWarning, stacklevel=2)
default_scheme = kwds.pop("default")
if default_scheme:
if default_scheme in _warn_no_bcrypt:
warn("HtpasswdFile: no bcrypt backends available, "
"using fallback for default scheme %r" % default_scheme,
exc.PasslibSecurityWarning)
default_scheme = htpasswd_defaults.get(default_scheme, default_scheme)
context = context.copy(default=default_scheme)
self.context = context
super(HtpasswdFile, self).__init__(path, **kwds)
def _parse_record(self, record, lineno):
# NOTE: should return (user, hash) tuple
result = record.rstrip().split(_BCOLON)
if len(result) != 2:
raise ValueError("malformed htpasswd file (error reading line %d)"
% lineno)
return result
def _render_record(self, user, hash):
return render_bytes("%s:%s\n", user, hash)
#===================================================================
# public methods
#===================================================================
def users(self):
"""
Return list of all users in database
"""
return [self._decode_field(user) for user in self._records]
##def has_user(self, user):
## "check whether entry is present for user"
## return self._encode_user(user) in self._records
##def rename(self, old, new):
## """rename user account"""
## old = self._encode_user(old)
## new = self._encode_user(new)
## hash = self._records.pop(old)
## self._records[new] = hash
## self._autosave()
def set_password(self, user, password):
"""Set password for user; adds user if needed.
:returns:
* ``True`` if existing user was updated.
* ``False`` if user account was added.
.. versionchanged:: 1.6
This method was previously called ``update``, it was renamed
to prevent ambiguity with the dictionary method.
The old alias is deprecated, and will be removed in Passlib 1.8.
"""
hash = self.context.hash(password)
return self.set_hash(user, hash)
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="set_password")
def update(self, user, password):
"""set password for user"""
return self.set_password(user, password)
def get_hash(self, user):
"""Return hash stored for user, or ``None`` if user not found.
.. versionchanged:: 1.6
This method was previously named ``find``, it was renamed
for clarity. The old name is deprecated, and will be removed
in Passlib 1.8.
"""
try:
return self._records[self._encode_user(user)]
except KeyError:
return None
def set_hash(self, user, hash):
"""
semi-private helper which allows writing a hash directly;
adds user if needed.
.. warning::
does not (currently) do any validation of the hash string
.. versionadded:: 1.7
"""
# assert self.context.identify(hash), "unrecognized hash format"
if PY3 and isinstance(hash, str):
hash = hash.encode(self.encoding)
user = self._encode_user(user)
existing = self._set_record(user, hash)
self._autosave()
return existing
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="get_hash")
def find(self, user):
"""return hash for user"""
return self.get_hash(user)
# XXX: rename to something more explicit, like delete_user()?
def delete(self, user):
"""Delete user's entry.
:returns:
* ``True`` if user deleted.
* ``False`` if user not found.
"""
try:
del self._records[self._encode_user(user)]
except KeyError:
return False
self._autosave()
return True
def check_password(self, user, password):
"""
Verify password for specified user.
If algorithm marked as deprecated by CryptContext, will automatically be re-hashed.
:returns:
* ``None`` if user not found.
* ``False`` if user found, but password does not match.
* ``True`` if user found and password matches.
.. versionchanged:: 1.6
This method was previously called ``verify``, it was renamed
to prevent ambiguity with the :class:`!CryptContext` method.
The old alias is deprecated, and will be removed in Passlib 1.8.
"""
user = self._encode_user(user)
hash = self._records.get(user)
if hash is None:
return None
if isinstance(password, unicode):
# NOTE: encoding password to match file, making the assumption
# that server will use same encoding to hash the password.
password = password.encode(self.encoding)
ok, new_hash = self.context.verify_and_update(password, hash)
if ok and new_hash is not None:
# rehash user's password if old hash was deprecated
assert user in self._records # otherwise would have to use ._set_record()
self._records[user] = new_hash
self._autosave()
return ok
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="check_password")
def verify(self, user, password):
"""verify password for user"""
return self.check_password(user, password)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# htdigest editing
#=============================================================================
class HtdigestFile(_CommonFile):
"""class for reading & writing Htdigest files.
The class constructor accepts the following arguments:
:type path: filepath
:param path:
Specifies path to htdigest file, use to implicitly load from and save to.
This class has two modes of operation:
1. It can be "bound" to a local file by passing a ``path`` to the class
constructor. In this case it will load the contents of the file when
created, and the :meth:`load` and :meth:`save` methods will automatically
load from and save to that file if they are called without arguments.
2. Alternately, it can exist as an independant object, in which case
:meth:`load` and :meth:`save` will require an explicit path to be
provided whenever they are called. As well, ``autosave`` behavior
will not be available.
This feature is new in Passlib 1.6, and is the default if no
``path`` value is provided to the constructor.
This is also exposed as a readonly instance attribute.
:type default_realm: str
:param default_realm:
If ``default_realm`` is set, all the :class:`HtdigestFile`
methods that require a realm will use this value if one is not
provided explicitly. If unset, they will raise an error stating
that an explicit realm is required.
This is also exposed as a writeable instance attribute.
.. versionadded:: 1.6
:type new: bool
:param new:
Normally, if *path* is specified, :class:`HtdigestFile` will
immediately load the contents of the file. However, when creating
a new htpasswd file, applications can set ``new=True`` so that
the existing file (if any) will not be loaded.
.. versionadded:: 1.6
This feature was previously enabled by setting ``autoload=False``.
That alias has been deprecated, and will be removed in Passlib 1.8
:type autosave: bool
:param autosave:
Normally, any changes made to an :class:`HtdigestFile` instance
will not be saved until :meth:`save` is explicitly called. However,
if ``autosave=True`` is specified, any changes made will be
saved to disk immediately (assuming *path* has been set).
This is also exposed as a writeable instance attribute.
:type encoding: str
:param encoding:
Optionally specify character encoding used to read/write file
and hash passwords. Defaults to ``utf-8``, though ``latin-1``
is the only other commonly encountered encoding.
This is also exposed as a readonly instance attribute.
:param autoload:
Set to ``False`` to prevent the constructor from automatically
loaded the file from disk.
.. deprecated:: 1.6
This has been replaced by the *new* keyword.
Instead of setting ``autoload=False``, you should use
``new=True``. Support for this keyword will be removed
in Passlib 1.8.
Loading & Saving
================
.. automethod:: load
.. automethod:: load_if_changed
.. automethod:: load_string
.. automethod:: save
.. automethod:: to_string
Inspection
==========
.. automethod:: realms
.. automethod:: users
.. automethod:: check_password(user[, realm], password)
.. automethod:: get_hash
Modification
============
.. automethod:: set_password(user[, realm], password)
.. automethod:: delete
.. automethod:: delete_realm
Alternate Constructors
======================
.. automethod:: from_string
Attributes
==========
.. attribute:: default_realm
The default realm that will be used if one is not provided
to methods that require it. By default this is ``None``,
in which case an explicit realm must be provided for every
method call. Can be written to.
.. attribute:: path
Path to local file that will be used as the default
for all :meth:`load` and :meth:`save` operations.
May be written to, initialized by the *path* constructor keyword.
.. attribute:: autosave
Writeable flag indicating whether changes will be automatically
written to *path*.
Errors
======
:raises ValueError:
All of the methods in this class will raise a :exc:`ValueError` if
any user name or realm contains a forbidden character (one of ``:\\r\\n\\t\\x00``),
or is longer than 255 characters.
"""
#===================================================================
# instance attrs
#===================================================================
# NOTE: _records map stores (<user>,<realm>) for the key,
# and <hash> as the value, all as <self.encoding> bytes.
# NOTE: unlike htpasswd, this class doesn't use a CryptContext,
# as only one hash format is supported: htdigest.
# optionally specify default realm that will be used if none
# is provided to a method call. otherwise realm is always required.
default_realm = None
#===================================================================
# init & serialization
#===================================================================
def __init__(self, path=None, default_realm=None, **kwds):
self.default_realm = default_realm
super(HtdigestFile, self).__init__(path, **kwds)
def _parse_record(self, record, lineno):
result = record.rstrip().split(_BCOLON)
if len(result) != 3:
raise ValueError("malformed htdigest file (error reading line %d)"
% lineno)
user, realm, hash = result
return (user, realm), hash
def _render_record(self, key, hash):
user, realm = key
return render_bytes("%s:%s:%s\n", user, realm, hash)
def _require_realm(self, realm):
if realm is None:
realm = self.default_realm
if realm is None:
raise TypeError("you must specify a realm explicitly, "
"or set the default_realm attribute")
return realm
def _encode_realm(self, realm):
realm = self._require_realm(realm)
return self._encode_field(realm, "realm")
def _encode_key(self, user, realm):
return self._encode_user(user), self._encode_realm(realm)
#===================================================================
# public methods
#===================================================================
def realms(self):
"""Return list of all realms in database"""
realms = set(key[1] for key in self._records)
return [self._decode_field(realm) for realm in realms]
def users(self, realm=None):
"""Return list of all users in specified realm.
* uses ``self.default_realm`` if no realm explicitly provided.
* returns empty list if realm not found.
"""
realm = self._encode_realm(realm)
return [self._decode_field(key[0]) for key in self._records
if key[1] == realm]
##def has_user(self, user, realm=None):
## "check if user+realm combination exists"
## return self._encode_key(user,realm) in self._records
##def rename_realm(self, old, new):
## """rename all accounts in realm"""
## old = self._encode_realm(old)
## new = self._encode_realm(new)
## keys = [key for key in self._records if key[1] == old]
## for key in keys:
## hash = self._records.pop(key)
## self._set_record((key[0], new), hash)
## self._autosave()
## return len(keys)
##def rename(self, old, new, realm=None):
## """rename user account"""
## old = self._encode_user(old)
## new = self._encode_user(new)
## realm = self._encode_realm(realm)
## hash = self._records.pop((old,realm))
## self._set_record((new, realm), hash)
## self._autosave()
def set_password(self, user, realm=None, password=_UNSET):
"""Set password for user; adds user & realm if needed.
If ``self.default_realm`` has been set, this may be called
with the syntax ``set_password(user, password)``,
otherwise it must be called with all three arguments:
``set_password(user, realm, password)``.
:returns:
* ``True`` if existing user was updated
* ``False`` if user account added.
"""
if password is _UNSET:
# called w/ two args - (user, password), use default realm
realm, password = None, realm
realm = self._require_realm(realm)
hash = htdigest.hash(password, user, realm, encoding=self.encoding)
return self.set_hash(user, realm, hash)
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="set_password")
def update(self, user, realm, password):
"""set password for user"""
return self.set_password(user, realm, password)
def get_hash(self, user, realm=None):
"""Return :class:`~zdppy_password_hash.hash.htdigest` hash stored for user.
* uses ``self.default_realm`` if no realm explicitly provided.
* returns ``None`` if user or realm not found.
.. versionchanged:: 1.6
This method was previously named ``find``, it was renamed
for clarity. The old name is deprecated, and will be removed
in Passlib 1.8.
"""
key = self._encode_key(user, realm)
hash = self._records.get(key)
if hash is None:
return None
if PY3:
hash = hash.decode(self.encoding)
return hash
def set_hash(self, user, realm=None, hash=_UNSET):
"""
semi-private helper which allows writing a hash directly;
adds user & realm if needed.
If ``self.default_realm`` has been set, this may be called
with the syntax ``set_hash(user, hash)``,
otherwise it must be called with all three arguments:
``set_hash(user, realm, hash)``.
.. warning::
does not (currently) do any validation of the hash string
.. versionadded:: 1.7
"""
if hash is _UNSET:
# called w/ two args - (user, hash), use default realm
realm, hash = None, realm
# assert htdigest.identify(hash), "unrecognized hash format"
if PY3 and isinstance(hash, str):
hash = hash.encode(self.encoding)
key = self._encode_key(user, realm)
existing = self._set_record(key, hash)
self._autosave()
return existing
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="get_hash")
def find(self, user, realm):
"""return hash for user"""
return self.get_hash(user, realm)
# XXX: rename to something more explicit, like delete_user()?
def delete(self, user, realm=None):
"""Delete user's entry for specified realm.
if realm is not specified, uses ``self.default_realm``.
:returns:
* ``True`` if user deleted,
* ``False`` if user not found in realm.
"""
key = self._encode_key(user, realm)
try:
del self._records[key]
except KeyError:
return False
self._autosave()
return True
def delete_realm(self, realm):
"""Delete all users for specified realm.
if realm is not specified, uses ``self.default_realm``.
:returns: number of users deleted (0 if realm not found)
"""
realm = self._encode_realm(realm)
records = self._records
keys = [key for key in records if key[1] == realm]
for key in keys:
del records[key]
self._autosave()
return len(keys)
def check_password(self, user, realm=None, password=_UNSET):
"""Verify password for specified user + realm.
If ``self.default_realm`` has been set, this may be called
with the syntax ``check_password(user, password)``,
otherwise it must be called with all three arguments:
``check_password(user, realm, password)``.
:returns:
* ``None`` if user or realm not found.
* ``False`` if user found, but password does not match.
* ``True`` if user found and password matches.
.. versionchanged:: 1.6
This method was previously called ``verify``, it was renamed
to prevent ambiguity with the :class:`!CryptContext` method.
The old alias is deprecated, and will be removed in Passlib 1.8.
"""
if password is _UNSET:
# called w/ two args - (user, password), use default realm
realm, password = None, realm
user = self._encode_user(user)
realm = self._encode_realm(realm)
hash = self._records.get((user,realm))
if hash is None:
return None
return htdigest.verify(password, hash, user, realm,
encoding=self.encoding)
@deprecated_method(deprecated="1.6", removed="1.8",
replacement="check_password")
def verify(self, user, realm, password):
"""verify password for user"""
return self.check_password(user, realm, password)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/apache.py | apache.py |
#=============================================================================
# imports
#=============================================================================
# core
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.exc import ExpectedTypeError, PasslibWarning
from zdppy_password_hash.ifc import PasswordHash
from zdppy_password_hash.utils import (
is_crypt_handler, has_crypt as os_crypt_present,
unix_crypt_schemes as os_crypt_schemes,
)
from zdppy_password_hash.utils.compat import unicode_or_str
from zdppy_password_hash.utils.decor import memoize_single_value
# local
__all__ = [
"register_crypt_handler_path",
"register_crypt_handler",
"get_crypt_handler",
"list_crypt_handlers",
]
#=============================================================================
# proxy object used in place of 'zdppy_password_hash.hash' module
#=============================================================================
class _PasslibRegistryProxy(object):
"""proxy module zdppy_password_hash.hash
this module is in fact an object which lazy-loads
the requested password hash algorithm from wherever it has been stored.
it acts as a thin wrapper around :func:`zdppy_password_hash.registry.get_crypt_handler`.
"""
__name__ = "zdppy_password_hash.hash"
__package__ = None
def __getattr__(self, attr):
if attr.startswith("_"):
raise AttributeError("missing attribute: %r" % (attr,))
handler = get_crypt_handler(attr, None)
if handler:
return handler
else:
raise AttributeError("unknown password hash: %r" % (attr,))
def __setattr__(self, attr, value):
if attr.startswith("_"):
# writing to private attributes should behave normally.
# (required so GAE can write to the __loader__ attribute).
object.__setattr__(self, attr, value)
else:
# writing to public attributes should be treated
# as attempting to register a handler.
register_crypt_handler(value, _attr=attr)
def __repr__(self):
return "<proxy module 'zdppy_password_hash.hash'>"
def __dir__(self):
# this adds in lazy-loaded handler names,
# otherwise this is the standard dir() implementation.
attrs = set(dir(self.__class__))
attrs.update(self.__dict__)
attrs.update(_locations)
return sorted(attrs)
# create single instance - available publically as 'zdppy_password_hash.hash'
_proxy = _PasslibRegistryProxy()
#=============================================================================
# internal registry state
#=============================================================================
# singleton uses to detect omitted keywords
_UNSET = object()
# dict mapping name -> loaded handlers (just uses proxy object's internal dict)
_handlers = _proxy.__dict__
# dict mapping names -> import path for lazy loading.
# * import path should be "module.path" or "module.path:attr"
# * if attr omitted, "name" used as default.
_locations = dict(
# NOTE: this is a hardcoded list of the handlers built into zdppy_password_hash,
# applications should call register_crypt_handler_path()
apr_md5_crypt = "zdppy_password_hash.handlers.md5_crypt",
argon2 = "zdppy_password_hash.handlers.argon2",
atlassian_pbkdf2_sha1 = "zdppy_password_hash.handlers.pbkdf2",
bcrypt = "zdppy_password_hash.handlers.bcrypt",
bcrypt_sha256 = "zdppy_password_hash.handlers.bcrypt",
bigcrypt = "zdppy_password_hash.handlers.des_crypt",
bsd_nthash = "zdppy_password_hash.handlers.windows",
bsdi_crypt = "zdppy_password_hash.handlers.des_crypt",
cisco_pix = "zdppy_password_hash.handlers.cisco",
cisco_asa = "zdppy_password_hash.handlers.cisco",
cisco_type7 = "zdppy_password_hash.handlers.cisco",
cta_pbkdf2_sha1 = "zdppy_password_hash.handlers.pbkdf2",
crypt16 = "zdppy_password_hash.handlers.des_crypt",
des_crypt = "zdppy_password_hash.handlers.des_crypt",
django_argon2 = "zdppy_password_hash.handlers.django",
django_bcrypt = "zdppy_password_hash.handlers.django",
django_bcrypt_sha256 = "zdppy_password_hash.handlers.django",
django_pbkdf2_sha256 = "zdppy_password_hash.handlers.django",
django_pbkdf2_sha1 = "zdppy_password_hash.handlers.django",
django_salted_sha1 = "zdppy_password_hash.handlers.django",
django_salted_md5 = "zdppy_password_hash.handlers.django",
django_des_crypt = "zdppy_password_hash.handlers.django",
django_disabled = "zdppy_password_hash.handlers.django",
dlitz_pbkdf2_sha1 = "zdppy_password_hash.handlers.pbkdf2",
fshp = "zdppy_password_hash.handlers.fshp",
grub_pbkdf2_sha512 = "zdppy_password_hash.handlers.pbkdf2",
hex_md4 = "zdppy_password_hash.handlers.digests",
hex_md5 = "zdppy_password_hash.handlers.digests",
hex_sha1 = "zdppy_password_hash.handlers.digests",
hex_sha256 = "zdppy_password_hash.handlers.digests",
hex_sha512 = "zdppy_password_hash.handlers.digests",
htdigest = "zdppy_password_hash.handlers.digests",
ldap_plaintext = "zdppy_password_hash.handlers.ldap_digests",
ldap_md5 = "zdppy_password_hash.handlers.ldap_digests",
ldap_sha1 = "zdppy_password_hash.handlers.ldap_digests",
ldap_hex_md5 = "zdppy_password_hash.handlers.roundup",
ldap_hex_sha1 = "zdppy_password_hash.handlers.roundup",
ldap_salted_md5 = "zdppy_password_hash.handlers.ldap_digests",
ldap_salted_sha1 = "zdppy_password_hash.handlers.ldap_digests",
ldap_salted_sha256 = "zdppy_password_hash.handlers.ldap_digests",
ldap_salted_sha512 = "zdppy_password_hash.handlers.ldap_digests",
ldap_des_crypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_bsdi_crypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_md5_crypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_bcrypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_sha1_crypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_sha256_crypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_sha512_crypt = "zdppy_password_hash.handlers.ldap_digests",
ldap_pbkdf2_sha1 = "zdppy_password_hash.handlers.pbkdf2",
ldap_pbkdf2_sha256 = "zdppy_password_hash.handlers.pbkdf2",
ldap_pbkdf2_sha512 = "zdppy_password_hash.handlers.pbkdf2",
lmhash = "zdppy_password_hash.handlers.windows",
md5_crypt = "zdppy_password_hash.handlers.md5_crypt",
msdcc = "zdppy_password_hash.handlers.windows",
msdcc2 = "zdppy_password_hash.handlers.windows",
mssql2000 = "zdppy_password_hash.handlers.mssql",
mssql2005 = "zdppy_password_hash.handlers.mssql",
mysql323 = "zdppy_password_hash.handlers.mysql",
mysql41 = "zdppy_password_hash.handlers.mysql",
nthash = "zdppy_password_hash.handlers.windows",
oracle10 = "zdppy_password_hash.handlers.oracle",
oracle11 = "zdppy_password_hash.handlers.oracle",
pbkdf2_sha1 = "zdppy_password_hash.handlers.pbkdf2",
pbkdf2_sha256 = "zdppy_password_hash.handlers.pbkdf2",
pbkdf2_sha512 = "zdppy_password_hash.handlers.pbkdf2",
phpass = "zdppy_password_hash.handlers.phpass",
plaintext = "zdppy_password_hash.handlers.misc",
postgres_md5 = "zdppy_password_hash.handlers.postgres",
roundup_plaintext = "zdppy_password_hash.handlers.roundup",
scram = "zdppy_password_hash.handlers.scram",
scrypt = "zdppy_password_hash.handlers.scrypt",
sha1_crypt = "zdppy_password_hash.handlers.sha1_crypt",
sha256_crypt = "zdppy_password_hash.handlers.sha2_crypt",
sha512_crypt = "zdppy_password_hash.handlers.sha2_crypt",
sun_md5_crypt = "zdppy_password_hash.handlers.sun_md5_crypt",
unix_disabled = "zdppy_password_hash.handlers.misc",
unix_fallback = "zdppy_password_hash.handlers.misc",
)
# master regexp for detecting valid handler names
_name_re = re.compile("^[a-z][a-z0-9_]+[a-z0-9]$")
# names which aren't allowed for various reasons
# (mainly keyword conflicts in CryptContext)
_forbidden_names = frozenset(["onload", "policy", "context", "all",
"default", "none", "auto"])
#=============================================================================
# registry frontend functions
#=============================================================================
def _validate_handler_name(name):
"""helper to validate handler name
:raises ValueError:
* if empty name
* if name not lower case
* if name contains double underscores
* if name is reserved (e.g. ``context``, ``all``).
"""
if not name:
raise ValueError("handler name cannot be empty: %r" % (name,))
if name.lower() != name:
raise ValueError("name must be lower-case: %r" % (name,))
if not _name_re.match(name):
raise ValueError("invalid name (must be 3+ characters, "
" begin with a-z, and contain only underscore, a-z, "
"0-9): %r" % (name,))
if '__' in name:
raise ValueError("name may not contain double-underscores: %r" %
(name,))
if name in _forbidden_names:
raise ValueError("that name is not allowed: %r" % (name,))
return True
def register_crypt_handler_path(name, path):
"""register location to lazy-load handler when requested.
custom hashes may be registered via :func:`register_crypt_handler`,
or they may be registered by this function,
which will delay actually importing and loading the handler
until a call to :func:`get_crypt_handler` is made for the specified name.
:arg name: name of handler
:arg path: module import path
the specified module path should contain a password hash handler
called :samp:`{name}`, or the path may contain a colon,
specifying the module and module attribute to use.
for example, the following would cause ``get_handler("myhash")`` to look
for a class named ``myhash`` within the ``myapp.helpers`` module::
>>> from zdppy_password_hash.registry import registry_crypt_handler_path
>>> registry_crypt_handler_path("myhash", "myapp.helpers")
...while this form would cause ``get_handler("myhash")`` to look
for a class name ``MyHash`` within the ``myapp.helpers`` module::
>>> from zdppy_password_hash.registry import registry_crypt_handler_path
>>> registry_crypt_handler_path("myhash", "myapp.helpers:MyHash")
"""
# validate name
_validate_handler_name(name)
# validate path
if path.startswith("."):
raise ValueError("path cannot start with '.'")
if ':' in path:
if path.count(':') > 1:
raise ValueError("path cannot have more than one ':'")
if path.find('.', path.index(':')) > -1:
raise ValueError("path cannot have '.' to right of ':'")
# store location
_locations[name] = path
log.debug("registered path to %r handler: %r", name, path)
def register_crypt_handler(handler, force=False, _attr=None):
"""register password hash handler.
this method immediately registers a handler with the internal zdppy_password_hash registry,
so that it will be returned by :func:`get_crypt_handler` when requested.
:arg handler: the password hash handler to register
:param force: force override of existing handler (defaults to False)
:param _attr:
[internal kwd] if specified, ensures ``handler.name``
matches this value, or raises :exc:`ValueError`.
:raises TypeError:
if the specified object does not appear to be a valid handler.
:raises ValueError:
if the specified object's name (or other required attributes)
contain invalid values.
:raises KeyError:
if a (different) handler was already registered with
the same name, and ``force=True`` was not specified.
"""
# validate handler
if not is_crypt_handler(handler):
raise ExpectedTypeError(handler, "password hash handler", "handler")
if not handler:
raise AssertionError("``bool(handler)`` must be True")
# validate name
name = handler.name
_validate_handler_name(name)
if _attr and _attr != name:
raise ValueError("handlers must be stored only under their own name (%r != %r)" %
(_attr, name))
# check for existing handler
other = _handlers.get(name)
if other:
if other is handler:
log.debug("same %r handler already registered: %r", name, handler)
return
elif force:
log.warning("overriding previously registered %r handler: %r",
name, other)
else:
raise KeyError("another %r handler has already been registered: %r" %
(name, other))
# register handler
_handlers[name] = handler
log.debug("registered %r handler: %r", name, handler)
def get_crypt_handler(name, default=_UNSET):
"""return handler for specified password hash scheme.
this method looks up a handler for the specified scheme.
if the handler is not already loaded,
it checks if the location is known, and loads it first.
:arg name: name of handler to return
:param default: optional default value to return if no handler with specified name is found.
:raises KeyError: if no handler matching that name is found, and no default specified, a KeyError will be raised.
:returns: handler attached to name, or default value (if specified).
"""
# catch invalid names before we check _handlers,
# since it's a module dict, and exposes things like __package__, etc.
if name.startswith("_"):
if default is _UNSET:
raise KeyError("invalid handler name: %r" % (name,))
else:
return default
# check if handler is already loaded
try:
return _handlers[name]
except KeyError:
pass
# normalize name (and if changed, check dict again)
assert isinstance(name, unicode_or_str), "name must be string instance"
alt = name.replace("-","_").lower()
if alt != name:
warn("handler names should be lower-case, and use underscores instead "
"of hyphens: %r => %r" % (name, alt), PasslibWarning,
stacklevel=2)
name = alt
# try to load using new name
try:
return _handlers[name]
except KeyError:
pass
# check if lazy load mapping has been specified for this driver
path = _locations.get(name)
if path:
if ':' in path:
modname, modattr = path.split(":")
else:
modname, modattr = path, name
##log.debug("loading %r handler from path: '%s:%s'", name, modname, modattr)
# try to load the module - any import errors indicate runtime config, usually
# either missing package, or bad path provided to register_crypt_handler_path()
mod = __import__(modname, fromlist=[modattr], level=0)
# first check if importing module triggered register_crypt_handler(),
# (this is discouraged due to its magical implicitness)
handler = _handlers.get(name)
if handler:
# XXX: issue deprecation warning here?
assert is_crypt_handler(handler), "unexpected object: name=%r object=%r" % (name, handler)
return handler
# then get real handler & register it
handler = getattr(mod, modattr)
register_crypt_handler(handler, _attr=name)
return handler
# fail!
if default is _UNSET:
raise KeyError("no crypt handler found for algorithm: %r" % (name,))
else:
return default
def list_crypt_handlers(loaded_only=False):
"""return sorted list of all known crypt handler names.
:param loaded_only: if ``True``, only returns names of handlers which have actually been loaded.
:returns: list of names of all known handlers
"""
names = set(_handlers)
if not loaded_only:
names.update(_locations)
# strip private attrs out of namespace and sort.
# TODO: make _handlers a separate list, so we don't have module namespace mixed in.
return sorted(name for name in names if not name.startswith("_"))
# NOTE: these two functions mainly exist just for the unittests...
def _has_crypt_handler(name, loaded_only=False):
"""check if handler name is known.
this is only useful for two cases:
* quickly checking if handler has already been loaded
* checking if handler exists, without actually loading it
:arg name: name of handler
:param loaded_only: if ``True``, returns False if handler exists but hasn't been loaded
"""
return (name in _handlers) or (not loaded_only and name in _locations)
def _unload_handler_name(name, locations=True):
"""unloads a handler from the registry.
.. warning::
this is an internal function,
used only by the unittests.
if loaded handler is found with specified name, it's removed.
if path to lazy load handler is found, it's removed.
missing names are a noop.
:arg name: name of handler to unload
:param locations: if False, won't purge registered handler locations (default True)
"""
if name in _handlers:
del _handlers[name]
if locations and name in _locations:
del _locations[name]
#=============================================================================
# inspection helpers
#=============================================================================
#------------------------------------------------------------------
# general
#------------------------------------------------------------------
# TODO: needs UTs
def _resolve(hasher, param="value"):
"""
internal helper to resolve argument to hasher object
"""
if is_crypt_handler(hasher):
return hasher
elif isinstance(hasher, unicode_or_str):
return get_crypt_handler(hasher)
else:
raise exc.ExpectedTypeError(hasher, unicode_or_str, param)
#: backend aliases
ANY = "any"
BUILTIN = "builtin"
OS_CRYPT = "os_crypt"
# TODO: needs UTs
def has_backend(hasher, backend=ANY, safe=False):
"""
Test if specified backend is available for hasher.
:param hasher:
Hasher name or object.
:param backend:
Name of backend, or ``"any"`` if any backend will do.
For hashers without multiple backends, will pretend
they have a single backend named ``"builtin"``.
:param safe:
By default, throws error if backend is unknown.
If ``safe=True``, will just return false value.
:raises ValueError:
* if hasher name is unknown.
* if backend is unknown to hasher, and safe=False.
:return:
True if backend available, False if not available,
and None if unknown + safe=True.
"""
hasher = _resolve(hasher)
if backend == ANY:
if not hasattr(hasher, "get_backend"):
# single backend, assume it's loaded
return True
# multiple backends, check at least one is loadable
try:
hasher.get_backend()
return True
except exc.MissingBackendError:
return False
# test for specific backend
if hasattr(hasher, "has_backend"):
# multiple backends
if safe and backend not in hasher.backends:
return None
return hasher.has_backend(backend)
# single builtin backend
if backend == BUILTIN:
return True
elif safe:
return None
else:
raise exc.UnknownBackendError(hasher, backend)
#------------------------------------------------------------------
# os crypt
#------------------------------------------------------------------
# TODO: move unix_crypt_schemes list to here.
# os_crypt_schemes -- alias for unix_crypt_schemes above
# TODO: needs UTs
@memoize_single_value
def get_supported_os_crypt_schemes():
"""
return tuple of schemes which :func:`crypt.crypt` natively supports.
"""
if not os_crypt_present:
return ()
cache = tuple(name for name in os_crypt_schemes
if get_crypt_handler(name).has_backend(OS_CRYPT))
if not cache: # pragma: no cover -- sanity check
# no idea what OS this could happen on...
import platform
warn("crypt.crypt() function is present, but doesn't support any "
"formats known to zdppy_password_hash! (system=%r release=%r)" %
(platform.system(), platform.release()),
exc.PasslibRuntimeWarning)
return cache
# TODO: needs UTs
def has_os_crypt_support(hasher):
"""
check if hash is supported by native :func:`crypt.crypt` function.
if :func:`crypt.crypt` is not present, will always return False.
:param hasher:
name or hasher object.
:returns bool:
True if hash format is supported by OS, else False.
"""
return os_crypt_present and has_backend(hasher, OS_CRYPT, safe=True)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/registry.py | registry.py |
#=============================================================================
# imports
#=============================================================================
# core
from warnings import warn
# pkg
from zdppy_password_hash.context import LazyCryptContext
from zdppy_password_hash.exc import PasslibRuntimeWarning
from zdppy_password_hash import registry
from zdppy_password_hash.utils import has_crypt, unix_crypt_schemes
# local
__all__ = [
"linux_context", "linux2_context",
"openbsd_context",
"netbsd_context",
"freebsd_context",
"host_context",
]
#=============================================================================
# linux support
#=============================================================================
# known platform names - linux2
linux_context = linux2_context = LazyCryptContext(
schemes = [ "sha512_crypt", "sha256_crypt", "md5_crypt",
"des_crypt", "unix_disabled" ],
deprecated = [ "des_crypt" ],
)
#=============================================================================
# bsd support
#=============================================================================
# known platform names -
# freebsd2
# freebsd3
# freebsd4
# freebsd5
# freebsd6
# freebsd7
#
# netbsd1
# referencing source via -http://fxr.googlebit.com
# freebsd 6,7,8 - des, md5, bcrypt, bsd_nthash
# netbsd - des, ext, md5, bcrypt, sha1
# openbsd - des, ext, md5, bcrypt
freebsd_context = LazyCryptContext(["bcrypt", "md5_crypt", "bsd_nthash",
"des_crypt", "unix_disabled"])
openbsd_context = LazyCryptContext(["bcrypt", "md5_crypt", "bsdi_crypt",
"des_crypt", "unix_disabled"])
netbsd_context = LazyCryptContext(["bcrypt", "sha1_crypt", "md5_crypt",
"bsdi_crypt", "des_crypt", "unix_disabled"])
# XXX: include darwin in this list? it's got a BSD crypt variant,
# but that's not what it uses for user passwords.
#=============================================================================
# current host
#=============================================================================
if registry.os_crypt_present:
# NOTE: this is basically mimicing the output of os crypt(),
# except that it uses zdppy_password_hash's (usually stronger) defaults settings,
# and can be inspected and used much more flexibly.
def _iter_os_crypt_schemes():
"""helper which iterates over supported os_crypt schemes"""
out = registry.get_supported_os_crypt_schemes()
if out:
# only offer disabled handler if there's another scheme in front,
# as this can't actually hash any passwords
out += ("unix_disabled",)
return out
host_context = LazyCryptContext(_iter_os_crypt_schemes())
#=============================================================================
# other platforms
#=============================================================================
# known platform strings -
# aix3
# aix4
# atheos
# beos5
# darwin
# generic
# hp-ux11
# irix5
# irix6
# mac
# next3
# os2emx
# riscos
# sunos5
# unixware7
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/hosts.py | hosts.py |
#=============================================================================
# imports
#=============================================================================
from __future__ import absolute_import, division, print_function
from zdppy_password_hash.utils.compat import PY3
# core
import base64
import calendar
import json
import logging; log = logging.getLogger(__name__)
import math
import struct
import sys
import time as _time
import re
if PY3:
from urllib.parse import urlparse, parse_qsl, quote, unquote
else:
from urllib import quote, unquote
from urlparse import urlparse, parse_qsl
from warnings import warn
# site
try:
# TOTP encrypted keys only supported if cryptography (https://cryptography.io) is installed
from cryptography.hazmat.backends import default_backend as _cg_default_backend
import cryptography.hazmat.primitives.ciphers.algorithms
import cryptography.hazmat.primitives.ciphers.modes
from cryptography.hazmat.primitives import ciphers as _cg_ciphers
del cryptography
except ImportError:
log.debug("can't import 'cryptography' package, totp encryption disabled")
_cg_ciphers = _cg_default_backend = None
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.exc import TokenError, MalformedTokenError, InvalidTokenError, UsedTokenError
from zdppy_password_hash.utils import (to_unicode, to_bytes, consteq,
getrandbytes, rng, SequenceMixin, xor_bytes, getrandstr)
from zdppy_password_hash.utils.binary import BASE64_CHARS, b32encode, b32decode
from zdppy_password_hash.utils.compat import (u, unicode, native_string_types, bascii_to_str, int_types, num_types,
irange, byte_elem_value, UnicodeIO, suppress_cause)
from zdppy_password_hash.utils.decor import hybrid_method, memoized_property
from zdppy_password_hash.crypto.digest import lookup_hash, compile_hmac, pbkdf2_hmac
from zdppy_password_hash.hash import pbkdf2_sha256
# local
__all__ = [
# frontend classes
"AppWallet",
"TOTP",
# errors (defined in zdppy_password_hash.exc, but exposed here for convenience)
"TokenError",
"MalformedTokenError",
"InvalidTokenError",
"UsedTokenError",
# internal helper classes
"TotpToken",
"TotpMatch",
]
#=============================================================================
# HACK: python < 2.7.4's urlparse() won't parse query strings unless the url scheme
# is one of the schemes in the urlparse.uses_query list. 2.7 abandoned
# this, and parses query if present, regardless of the scheme.
# as a workaround for older versions, we add "otpauth" to the known list.
# this was fixed by https://bugs.python.org/issue9374, in 2.7.4 release.
#=============================================================================
if sys.version_info < (2,7,4):
from urlparse import uses_query
if "otpauth" not in uses_query:
uses_query.append("otpauth")
log.debug("registered 'otpauth' scheme with urlparse.uses_query")
del uses_query
#=============================================================================
# internal helpers
#=============================================================================
#-----------------------------------------------------------------------------
# token parsing / rendering helpers
#-----------------------------------------------------------------------------
#: regex used to clean whitespace from tokens & keys
_clean_re = re.compile(u(r"\s|[-=]"), re.U)
_chunk_sizes = [4,6,5]
def _get_group_size(klen):
"""
helper for group_string() --
calculates optimal size of group for given string size.
"""
# look for exact divisor
for size in _chunk_sizes:
if not klen % size:
return size
# fallback to divisor with largest remainder
# (so chunks are as close to even as possible)
best = _chunk_sizes[0]
rem = 0
for size in _chunk_sizes:
if klen % size > rem:
best = size
rem = klen % size
return best
def group_string(value, sep="-"):
"""
reformat string into (roughly) evenly-sized groups, separated by **sep**.
useful for making tokens & keys easier to read by humans.
"""
klen = len(value)
size = _get_group_size(klen)
return sep.join(value[o:o+size] for o in irange(0, klen, size))
#-----------------------------------------------------------------------------
# encoding helpers
#-----------------------------------------------------------------------------
def _decode_bytes(key, format):
"""
internal TOTP() helper --
decodes key according to specified format.
"""
if format == "raw":
if not isinstance(key, bytes):
raise exc.ExpectedTypeError(key, "bytes", "key")
return key
# for encoded data, key must be either unicode or ascii-encoded bytes,
# and must contain a hex or base32 string.
key = to_unicode(key, param="key")
key = _clean_re.sub("", key).encode("utf-8") # strip whitespace & hypens
if format == "hex" or format == "base16":
return base64.b16decode(key.upper())
elif format == "base32":
return b32decode(key)
# XXX: add base64 support?
else:
raise ValueError("unknown byte-encoding format: %r" % (format,))
#=============================================================================
# OTP management
#=============================================================================
#: flag for detecting if encrypted totp support is present
AES_SUPPORT = bool(_cg_ciphers)
#: regex for validating secret tags
_tag_re = re.compile("(?i)^[a-z0-9][a-z0-9_.-]*$")
class AppWallet(object):
"""
This class stores application-wide secrets that can be used
to encrypt & decrypt TOTP keys for storage.
It's mostly an internal detail, applications usually just need
to pass ``secrets`` or ``secrets_path`` to :meth:`TOTP.using`.
.. seealso::
:ref:`totp-storing-instances` for more details on this workflow.
Arguments
=========
:param secrets:
Dict of application secrets to use when encrypting/decrypting
stored TOTP keys. This should include a secret to use when encrypting
new keys, but may contain additional older secrets to decrypt
existing stored keys.
The dict should map tags -> secrets, so that each secret is identified
by a unique tag. This tag will be stored along with the encrypted
key in order to determine which secret should be used for decryption.
Tag should be string that starts with regex range ``[a-z0-9]``,
and the remaining characters must be in ``[a-z0-9_.-]``.
It is recommended to use something like a incremental counter
("1", "2", ...), an ISO date ("2016-01-01", "2016-05-16", ...),
or a timestamp ("19803495", "19813495", ...) when assigning tags.
This mapping be provided in three formats:
* A python dict mapping tag -> secret
* A JSON-formatted string containing the dict
* A multiline string with the format ``"tag: value\\ntag: value\\n..."``
(This last format is mainly useful when loading from a text file via **secrets_path**)
.. seealso:: :func:`generate_secret` to create a secret with sufficient entropy
:param secrets_path:
Alternately, callers can specify a separate file where the
application-wide secrets are stored, using either of the string
formats described in **secrets**.
:param default_tag:
Specifies which tag in **secrets** should be used as the default
for encrypting new keys. If omitted, the tags will be sorted,
and the largest tag used as the default.
if all tags are numeric, they will be sorted numerically;
otherwise they will be sorted alphabetically.
this permits tags to be assigned numerically,
or e.g. using ``YYYY-MM-DD`` dates.
:param encrypt_cost:
Optional time-cost factor for key encryption.
This value corresponds to log2() of the number of PBKDF2
rounds used.
.. warning::
The application secret(s) should be stored in a secure location by
your application, and each secret should contain a large amount
of entropy (to prevent brute-force attacks if the encrypted keys
are leaked).
:func:`generate_secret` is provided as a convenience helper
to generate a new application secret of suitable size.
Best practice is to load these values from a file via **secrets_path**,
and then have your application give up permission to read this file
once it's running.
Public Methods
==============
.. autoattribute:: has_secrets
.. autoattribute:: default_tag
Semi-Private Methods
====================
The following methods are used internally by the :class:`TOTP`
class in order to encrypt & decrypt keys using the provided application
secrets. They will generally not be publically useful, and may have their
API changed periodically.
.. automethod:: get_secret
.. automethod:: encrypt_key
.. automethod:: decrypt_key
"""
#========================================================================
# instance attrs
#========================================================================
#: default salt size for encrypt_key() output
salt_size = 12
#: default cost (log2 of pbkdf2 rounds) for encrypt_key() output
#: NOTE: this is relatively low, since the majority of the security
#: relies on a high entropy secret to pass to AES.
encrypt_cost = 14
#: map of secret tag -> secret bytes
_secrets = None
#: tag for default secret
default_tag = None
#========================================================================
# init
#========================================================================
def __init__(self, secrets=None, default_tag=None, encrypt_cost=None,
secrets_path=None):
# TODO: allow a lot more things to be customized from here,
# e.g. setting default TOTP constructor options.
#
# init cost
#
if encrypt_cost is not None:
if isinstance(encrypt_cost, native_string_types):
encrypt_cost = int(encrypt_cost)
assert encrypt_cost >= 0
self.encrypt_cost = encrypt_cost
#
# init secrets map
#
# load secrets from file (if needed)
if secrets_path is not None:
if secrets is not None:
raise TypeError("'secrets' and 'secrets_path' are mutually exclusive")
secrets = open(secrets_path, "rt").read()
# parse & store secrets
secrets = self._secrets = self._parse_secrets(secrets)
#
# init default tag/secret
#
if secrets:
if default_tag is not None:
# verify that tag is present in map
self.get_secret(default_tag)
elif all(tag.isdigit() for tag in secrets):
default_tag = max(secrets, key=int)
else:
default_tag = max(secrets)
self.default_tag = default_tag
def _parse_secrets(self, source):
"""
parse 'secrets' parameter
:returns:
Dict[tag:str, secret:bytes]
"""
# parse string formats
# to make this easy to pass in configuration from a separate file,
# 'secrets' can be string using two formats -- json & "tag:value\n"
check_type = True
if isinstance(source, native_string_types):
if source.lstrip().startswith(("[", "{")):
# json list / dict
source = json.loads(source)
elif "\n" in source and ":" in source:
# multiline string containing series of "tag: value\n" rows;
# empty and "#\n" rows are ignored
def iter_pairs(source):
for line in source.splitlines():
line = line.strip()
if line and not line.startswith("#"):
tag, secret = line.split(":", 1)
yield tag.strip(), secret.strip()
source = iter_pairs(source)
check_type = False
else:
raise ValueError("unrecognized secrets string format")
# ensure we have iterable of (tag, value) pairs
if source is None:
return {}
elif isinstance(source, dict):
source = source.items()
# XXX: could support iterable of (tag,value) pairs, but not yet needed...
# elif check_type and (isinstance(source, str) or not isinstance(source, Iterable)):
elif check_type:
raise TypeError("'secrets' must be mapping, or list of items")
# parse into final dict, normalizing contents
return dict(self._parse_secret_pair(tag, value)
for tag, value in source)
def _parse_secret_pair(self, tag, value):
if isinstance(tag, native_string_types):
pass
elif isinstance(tag, int):
tag = str(tag)
else:
raise TypeError("tag must be unicode/string: %r" % (tag,))
if not _tag_re.match(tag):
raise ValueError("tag contains invalid characters: %r" % (tag,))
if not isinstance(value, bytes):
value = to_bytes(value, param="secret %r" % (tag,))
if not value:
raise ValueError("tag contains empty secret: %r" % (tag,))
return tag, value
#========================================================================
# accessing secrets
#========================================================================
@property
def has_secrets(self):
"""whether at least one application secret is present"""
return self.default_tag is not None
def get_secret(self, tag):
"""
resolve a secret tag to the secret (as bytes).
throws a KeyError if not found.
"""
secrets = self._secrets
if not secrets:
raise KeyError("no application secrets configured")
try:
return secrets[tag]
except KeyError:
raise suppress_cause(KeyError("unknown secret tag: %r" % (tag,)))
#========================================================================
# encrypted key helpers -- used internally by TOTP
#========================================================================
@staticmethod
def _cipher_aes_key(value, secret, salt, cost, decrypt=False):
"""
Internal helper for :meth:`encrypt_key` --
handles lowlevel encryption/decryption.
Algorithm details:
This function uses PBKDF2-HMAC-SHA256 to generate a 32-byte AES key
and a 16-byte IV from the application secret & random salt.
It then uses AES-256-CTR to encrypt/decrypt the TOTP key.
CTR mode was chosen over CBC because the main attack scenario here
is that the attacker has stolen the database, and is trying to decrypt a TOTP key
(the plaintext value here). To make it hard for them, we want every password
to decrypt to a potentially valid key -- thus need to avoid any authentication
or padding oracle attacks. While some random padding construction could be devised
to make this work for CBC mode, a stream cipher mode is just plain simpler.
OFB/CFB modes would also work here, but seeing as they have malleability
and cyclic issues (though remote and barely relevant here),
CTR was picked as the best overall choice.
"""
# make sure backend AES support is available
if _cg_ciphers is None:
raise RuntimeError("TOTP encryption requires 'cryptography' package "
"(https://cryptography.io)")
# use pbkdf2 to derive both key (32 bytes) & iv (16 bytes)
# NOTE: this requires 2 sha256 blocks to be calculated.
keyiv = pbkdf2_hmac("sha256", secret, salt=salt, rounds=(1 << cost), keylen=48)
# use AES-256-CTR to encrypt/decrypt input value
cipher = _cg_ciphers.Cipher(_cg_ciphers.algorithms.AES(keyiv[:32]),
_cg_ciphers.modes.CTR(keyiv[32:]),
_cg_default_backend())
ctx = cipher.decryptor() if decrypt else cipher.encryptor()
return ctx.update(value) + ctx.finalize()
def encrypt_key(self, key):
"""
Helper used to encrypt TOTP keys for storage.
:param key:
TOTP key to encrypt, as raw bytes.
:returns:
dict containing encrypted TOTP key & configuration parameters.
this format should be treated as opaque, and potentially subject
to change, though it is designed to be easily serialized/deserialized
(e.g. via JSON).
.. note::
This function requires installation of the external
`cryptography <https://cryptography.io>`_ package.
To give some algorithm details: This function uses AES-256-CTR to encrypt
the provided data. It takes the application secret and randomly generated salt,
and uses PBKDF2-HMAC-SHA256 to combine them and generate the AES key & IV.
"""
if not key:
raise ValueError("no key provided")
salt = getrandbytes(rng, self.salt_size)
cost = self.encrypt_cost
tag = self.default_tag
if not tag:
raise TypeError("no application secrets configured, can't encrypt OTP key")
ckey = self._cipher_aes_key(key, self.get_secret(tag), salt, cost)
# XXX: switch to base64?
return dict(v=1, c=cost, t=tag, s=b32encode(salt), k=b32encode(ckey))
def decrypt_key(self, enckey):
"""
Helper used to decrypt TOTP keys from storage format.
Consults configured secrets to decrypt key.
:param source:
source object, as returned by :meth:`encrypt_key`.
:returns:
``(key, needs_recrypt)`` --
**key** will be the decrypted key, as bytes.
**needs_recrypt** will be a boolean flag indicating
whether encryption cost or default tag is too old,
and henace that key needs re-encrypting before storing.
.. note::
This function requires installation of the external
`cryptography <https://cryptography.io>`_ package.
"""
if not isinstance(enckey, dict):
raise TypeError("'enckey' must be dictionary")
version = enckey.get("v", None)
needs_recrypt = False
if version == 1:
_cipher_key = self._cipher_aes_key
else:
raise ValueError("missing / unrecognized 'enckey' version: %r" % (version,))
tag = enckey['t']
cost = enckey['c']
key = _cipher_key(
value=b32decode(enckey['k']),
secret=self.get_secret(tag),
salt=b32decode(enckey['s']),
cost=cost,
)
if cost != self.encrypt_cost or tag != self.default_tag:
needs_recrypt = True
return key, needs_recrypt
#=============================================================================
# eoc
#=============================================================================
#=============================================================================
# TOTP class
#=============================================================================
#: helper to convert HOTP counter to bytes
_pack_uint64 = struct.Struct(">Q").pack
#: helper to extract value from HOTP digest
_unpack_uint32 = struct.Struct(">I").unpack
#: dummy bytes used as temp key for .using() method
_DUMMY_KEY = b"\x00" * 16
class TOTP(object):
"""
Helper for generating and verifying TOTP codes.
Given a secret key and set of configuration options, this object
offers methods for token generation, token validation, and serialization.
It can also be used to track important persistent TOTP state,
such as the last counter used.
This class accepts the following options
(only **key** and **format** may be specified as positional arguments).
:arg str key:
The secret key to use. By default, should be encoded as
a base32 string (see **format** for other encodings).
Exactly one of **key** or ``new=True`` must be specified.
:arg str format:
The encoding used by the **key** parameter. May be one of:
``"base32"`` (base32-encoded string),
``"hex"`` (hexadecimal string), or ``"raw"`` (raw bytes).
Defaults to ``"base32"``.
:param bool new:
If ``True``, a new key will be generated using :class:`random.SystemRandom`.
Exactly one ``new=True`` or **key** must be specified.
:param str label:
Label to associate with this token when generating a URI.
Displayed to user by most OTP client applications (e.g. Google Authenticator),
and typically has format such as ``"John Smith"`` or ``"[email protected]"``.
Defaults to ``None``.
See :meth:`to_uri` for details.
:param str issuer:
String identifying the token issuer (e.g. the domain name of your service).
Used internally by some OTP client applications (e.g. Google Authenticator) to distinguish entries
which otherwise have the same label.
Optional but strongly recommended if you're rendering to a URI.
Defaults to ``None``.
See :meth:`to_uri` for details.
:param int size:
Number of bytes when generating new keys. Defaults to size of hash algorithm (e.g. 20 for SHA1).
.. warning::
Overriding the default values for ``digits``, ``period``, or ``alg`` may
cause problems with some OTP client programs (such as Google Authenticator),
which may have these defaults hardcoded.
:param int digits:
The number of digits in the generated / accepted tokens. Defaults to ``6``.
Must be in range [6 .. 10].
.. rst-class:: inline-title
.. caution::
Due to a limitation of the HOTP algorithm, the 10th digit can only take on values 0 .. 2,
and thus offers very little extra security.
:param str alg:
Name of hash algorithm to use. Defaults to ``"sha1"``.
``"sha256"`` and ``"sha512"`` are also accepted, per :rfc:`6238`.
:param int period:
The time-step period to use, in integer seconds. Defaults to ``30``.
..
See the zdppy_password_hash documentation for a full list of attributes & methods.
"""
#=============================================================================
# class attrs
#=============================================================================
#: minimum number of bytes to allow in key, enforced by zdppy_password_hash.
# XXX: see if spec says anything relevant to this.
_min_key_size = 10
#: minimum & current serialization version (may be set independently by subclasses)
min_json_version = json_version = 1
#: AppWallet that this class will use for encrypting/decrypting keys.
#: (can be overwritten via the :meth:`TOTP.using()` constructor)
wallet = None
#: function to get system time in seconds, as needed by :meth:`generate` and :meth:`verify`.
#: defaults to :func:`time.time`, but can be overridden on a per-instance basis.
now = _time.time
#=============================================================================
# instance attrs
#=============================================================================
#---------------------------------------------------------------------------
# configuration attrs
#---------------------------------------------------------------------------
#: [private] secret key as raw :class:`!bytes`
#: see .key property for public access.
_key = None
#: [private] cached copy of encrypted secret,
#: so .to_json() doesn't have to re-encrypt on each call.
_encrypted_key = None
#: [private] cached copy of keyed HMAC function,
#: so ._generate() doesn't have to rebuild this each time
#: ._find_match() invokes it.
_keyed_hmac = None
#: number of digits in the generated tokens.
digits = 6
#: name of hash algorithm in use (e.g. ``"sha1"``)
alg = "sha1"
#: default label for :meth:`to_uri`
label = None
#: default issuer for :meth:`to_uri`
issuer = None
#: number of seconds per counter step.
#: *(TOTP uses an internal time-derived counter which
#: increments by 1 every* :attr:`!period` *seconds)*.
period = 30
#---------------------------------------------------------------------------
# state attrs
#---------------------------------------------------------------------------
#: Flag set by deserialization methods to indicate the object needs to be re-serialized.
#: This can be for a number of reasons -- encoded using deprecated format,
#: or encrypted using a deprecated key or too few rounds.
changed = False
#=============================================================================
# prototype construction
#=============================================================================
@classmethod
def using(cls, digits=None, alg=None, period=None,
issuer=None, wallet=None, now=None, **kwds):
"""
Dynamically create subtype of :class:`!TOTP` class
which has the specified defaults set.
:parameters: **digits, alg, period, issuer**:
All these options are the same as in the :class:`TOTP` constructor,
and the resulting class will use any values you specify here
as the default for all TOTP instances it creates.
:param wallet:
Optional :class:`AppWallet` that will be used for encrypting/decrypting keys.
:param secrets, secrets_path, encrypt_cost:
If specified, these options will be passed to the :class:`AppWallet` constructor,
allowing you to directly specify the secret keys that should be used
to encrypt & decrypt stored keys.
:returns:
subclass of :class:`!TOTP`.
This method is useful for creating a TOTP class configured
to use your application's secrets for encrypting & decrypting
keys, as well as create new keys using it's desired configuration defaults.
As an example::
>>> # your application can create a custom class when it initializes
>>> from zdppy_password_hash.totp import TOTP, generate_secret
>>> TotpFactory = TOTP.using(secrets={"1": generate_secret()})
>>> # subsequent TOTP objects created from this factory
>>> # will use the specified secrets to encrypt their keys...
>>> totp = TotpFactory.new()
>>> totp.to_dict()
{'enckey': {'c': 14,
'k': 'H77SYXWORDPGVOQTFRR2HFUB3C45XXI7',
's': 'G5DOQPIHIBUM2OOHHADQ',
't': '1',
'v': 1},
'type': 'totp',
'v': 1}
.. seealso:: :ref:`totp-creation` and :ref:`totp-storing-instances` tutorials for a usage example
"""
# XXX: could add support for setting default match 'window' and 'reuse' policy
# :param now:
# Optional callable that should return current time for generator to use.
# Default to :func:`time.time`. This optional is generally not needed,
# and is mainly present for examples & unit-testing.
subcls = type("TOTP", (cls,), {})
def norm_param(attr, value):
"""
helper which uses constructor to validate parameter value.
it returns corresponding attribute, so we use normalized value.
"""
# NOTE: this creates *subclass* instance,
# so normalization takes into account any custom params
# already stored.
kwds = dict(key=_DUMMY_KEY, format="raw")
kwds[attr] = value
obj = subcls(**kwds)
return getattr(obj, attr)
if digits is not None:
subcls.digits = norm_param("digits", digits)
if alg is not None:
subcls.alg = norm_param("alg", alg)
if period is not None:
subcls.period = norm_param("period", period)
# XXX: add default size as configurable parameter?
if issuer is not None:
subcls.issuer = norm_param("issuer", issuer)
if kwds:
subcls.wallet = AppWallet(**kwds)
if wallet:
raise TypeError("'wallet' and 'secrets' keywords are mutually exclusive")
elif wallet is not None:
if not isinstance(wallet, AppWallet):
raise exc.ExpectedTypeError(wallet, AppWallet, "wallet")
subcls.wallet = wallet
if now is not None:
assert isinstance(now(), num_types) and now() >= 0, \
"now() function must return non-negative int/float"
subcls.now = staticmethod(now)
return subcls
#=============================================================================
# init
#=============================================================================
@classmethod
def new(cls, **kwds):
"""
convenience alias for creating new TOTP key, same as ``TOTP(new=True)``
"""
return cls(new=True, **kwds)
def __init__(self, key=None, format="base32",
# keyword only...
new=False, digits=None, alg=None, size=None, period=None,
label=None, issuer=None, changed=False,
**kwds):
super(TOTP, self).__init__(**kwds)
if changed:
self.changed = changed
# validate & normalize alg
info = lookup_hash(alg or self.alg)
self.alg = info.name
digest_size = info.digest_size
if digest_size < 4:
raise RuntimeError("%r hash digest too small" % alg)
# parse or generate new key
if new:
# generate new key
if key:
raise TypeError("'key' and 'new=True' are mutually exclusive")
if size is None:
# default to digest size, per RFC 6238 Section 5.1
size = digest_size
elif size > digest_size:
# not forbidden by spec, but would just be wasted bytes.
# maybe just warn about this?
raise ValueError("'size' should be less than digest size "
"(%d)" % digest_size)
self.key = getrandbytes(rng, size)
elif not key:
raise TypeError("must specify either an existing 'key', or 'new=True'")
elif format == "encrypted":
# NOTE: this handles decrypting & setting '.key'
self.encrypted_key = key
elif key:
# use existing key, encoded using specified <format>
self.key = _decode_bytes(key, format)
# enforce min key size
if len(self.key) < self._min_key_size:
# only making this fatal for new=True,
# so that existing (but ridiculously small) keys can still be used.
msg = "for security purposes, secret key must be >= %d bytes" % self._min_key_size
if new:
raise ValueError(msg)
else:
warn(msg, exc.PasslibSecurityWarning, stacklevel=1)
# validate digits
if digits is None:
digits = self.digits
if not isinstance(digits, int_types):
raise TypeError("digits must be an integer, not a %r" % type(digits))
if digits < 6 or digits > 10:
raise ValueError("digits must in range(6,11)")
self.digits = digits
# validate label
if label:
self._check_label(label)
self.label = label
# validate issuer
if issuer:
self._check_issuer(issuer)
self.issuer = issuer
# init period
if period is not None:
self._check_serial(period, "period", minval=1)
self.period = period
#=============================================================================
# helpers to verify value types & ranges
#=============================================================================
@staticmethod
def _check_serial(value, param, minval=0):
"""
check that serial value (e.g. 'counter') is non-negative integer
"""
if not isinstance(value, int_types):
raise exc.ExpectedTypeError(value, "int", param)
if value < minval:
raise ValueError("%s must be >= %d" % (param, minval))
@staticmethod
def _check_label(label):
"""
check that label doesn't contain chars forbidden by KeyURI spec
"""
if label and ":" in label:
raise ValueError("label may not contain ':'")
@staticmethod
def _check_issuer(issuer):
"""
check that issuer doesn't contain chars forbidden by KeyURI spec
"""
if issuer and ":" in issuer:
raise ValueError("issuer may not contain ':'")
#=============================================================================
# key attributes
#=============================================================================
#------------------------------------------------------------------
# raw key
#------------------------------------------------------------------
@property
def key(self):
"""
secret key as raw bytes
"""
return self._key
@key.setter
def key(self, value):
# set key
if not isinstance(value, bytes):
raise exc.ExpectedTypeError(value, bytes, "key")
self._key = value
# clear cached properties derived from key
self._encrypted_key = self._keyed_hmac = None
#------------------------------------------------------------------
# encrypted key
#------------------------------------------------------------------
@property
def encrypted_key(self):
"""
secret key, encrypted using application secret.
this match the output of :meth:`AppWallet.encrypt_key`,
and should be treated as an opaque json serializable object.
"""
enckey = self._encrypted_key
if enckey is None:
wallet = self.wallet
if not wallet:
raise TypeError("no application secrets present, can't encrypt TOTP key")
enckey = self._encrypted_key = wallet.encrypt_key(self.key)
return enckey
@encrypted_key.setter
def encrypted_key(self, value):
wallet = self.wallet
if not wallet:
raise TypeError("no application secrets present, can't decrypt TOTP key")
self.key, needs_recrypt = wallet.decrypt_key(value)
if needs_recrypt:
# mark as changed so it gets re-encrypted & written to db
self.changed = True
else:
# cache encrypted key for re-use
self._encrypted_key = value
#------------------------------------------------------------------
# pretty-printed / encoded key helpers
#------------------------------------------------------------------
@property
def hex_key(self):
"""
secret key encoded as hexadecimal string
"""
return bascii_to_str(base64.b16encode(self.key)).lower()
@property
def base32_key(self):
"""
secret key encoded as base32 string
"""
return b32encode(self.key)
def pretty_key(self, format="base32", sep="-"):
"""
pretty-print the secret key.
This is mainly useful for situations where the user cannot get the qrcode to work,
and must enter the key manually into their TOTP client. It tries to format
the key in a manner that is easier for humans to read.
:param format:
format to output secret key. ``"hex"`` and ``"base32"`` are both accepted.
:param sep:
separator to insert to break up key visually.
can be any of ``"-"`` (the default), ``" "``, or ``False`` (no separator).
:return:
key as native string.
Usage example::
>>> t = TOTP('s3jdvb7qd2r7jpxx')
>>> t.pretty_key()
'S3JD-VB7Q-D2R7-JPXX'
"""
if format == "hex" or format == "base16":
key = self.hex_key
elif format == "base32":
key = self.base32_key
else:
raise ValueError("unknown byte-encoding format: %r" % (format,))
if sep:
key = group_string(key, sep)
return key
#=============================================================================
# time & token parsing
#=============================================================================
@classmethod
def normalize_time(cls, time):
"""
Normalize time value to unix epoch seconds.
:arg time:
Can be ``None``, :class:`!datetime`,
or unix epoch timestamp as :class:`!float` or :class:`!int`.
If ``None``, uses current system time.
Naive datetimes are treated as UTC.
:returns:
unix epoch timestamp as :class:`int`.
"""
if isinstance(time, int_types):
return time
elif isinstance(time, float):
return int(time)
elif time is None:
return int(cls.now())
elif hasattr(time, "utctimetuple"):
# coerce datetime to UTC timestamp
# NOTE: utctimetuple() assumes naive datetimes are in UTC
# NOTE: we explicitly *don't* want microseconds.
return calendar.timegm(time.utctimetuple())
else:
raise exc.ExpectedTypeError(time, "int, float, or datetime", "time")
def _time_to_counter(self, time):
"""
convert timestamp to HOTP counter using :attr:`period`.
"""
return time // self.period
def _counter_to_time(self, counter):
"""
convert HOTP counter to timestamp using :attr:`period`.
"""
return counter * self.period
@hybrid_method
def normalize_token(self_or_cls, token):
"""
Normalize OTP token representation:
strips whitespace, converts integers to a zero-padded string,
validates token content & number of digits.
This is a hybrid method -- it can be called at the class level,
as ``TOTP.normalize_token()``, or the instance level as ``TOTP().normalize_token()``.
It will normalize to the instance-specific number of :attr:`~TOTP.digits`,
or use the class default.
:arg token:
token as ascii bytes, unicode, or an integer.
:raises ValueError:
if token has wrong number of digits, or contains non-numeric characters.
:returns:
token as :class:`!unicode` string, containing only digits 0-9.
"""
digits = self_or_cls.digits
if isinstance(token, int_types):
token = u("%0*d") % (digits, token)
else:
token = to_unicode(token, param="token")
token = _clean_re.sub(u(""), token)
if not token.isdigit():
raise MalformedTokenError("Token must contain only the digits 0-9")
if len(token) != digits:
raise MalformedTokenError("Token must have exactly %d digits" % digits)
return token
#=============================================================================
# token generation
#=============================================================================
# # debug helper
# def generate_range(self, size, time=None):
# counter = self._time_to_counter(time) - (size + 1) // 2
# end = counter + size
# while counter <= end:
# token = self._generate(counter)
# yield TotpToken(self, token, counter)
# counter += 1
def generate(self, time=None):
"""
Generate token for specified time
(uses current time if none specified).
:arg time:
Can be ``None``, a :class:`!datetime`,
or class:`!float` / :class:`!int` unix epoch timestamp.
If ``None`` (the default), uses current system time.
Naive datetimes are treated as UTC.
:returns:
A :class:`TotpToken` instance, which can be treated
as a sequence of ``(token, expire_time)`` -- see that class
for more details.
Usage example::
>>> # generate a new token, wrapped in a TotpToken instance...
>>> otp = TOTP('s3jdvb7qd2r7jpxx')
>>> otp.generate(1419622739)
<TotpToken token='897212' expire_time=1419622740>
>>> # when you just need the token...
>>> otp.generate(1419622739).token
'897212'
"""
time = self.normalize_time(time)
counter = self._time_to_counter(time)
if counter < 0:
raise ValueError("timestamp must be >= 0")
token = self._generate(counter)
return TotpToken(self, token, counter)
def _generate(self, counter):
"""
base implementation of HOTP token generation algorithm.
:arg counter: HOTP counter, as non-negative integer
:returns: token as unicode string
"""
# generate digest
assert isinstance(counter, int_types), "counter must be integer"
assert counter >= 0, "counter must be non-negative"
keyed_hmac = self._keyed_hmac
if keyed_hmac is None:
keyed_hmac = self._keyed_hmac = compile_hmac(self.alg, self.key)
digest = keyed_hmac(_pack_uint64(counter))
digest_size = keyed_hmac.digest_info.digest_size
assert len(digest) == digest_size, "digest_size: sanity check failed"
# derive 31-bit token value
assert digest_size >= 20, "digest_size: sanity check 2 failed" # otherwise 0xF+4 will run off end of hash.
offset = byte_elem_value(digest[-1]) & 0xF
value = _unpack_uint32(digest[offset:offset+4])[0] & 0x7fffffff
# render to decimal string, return last <digits> chars
# NOTE: the 10'th digit is not as secure, as it can only take on values 0-2, not 0-9,
# due to 31-bit mask on int ">I". But some servers / clients use it :|
# if 31-bit mask removed (which breaks spec), would only get values 0-4.
digits = self.digits
assert 0 < digits < 11, "digits: sanity check failed"
return (u("%0*d") % (digits, value))[-digits:]
#=============================================================================
# token verification
#=============================================================================
@classmethod
def verify(cls, token, source, **kwds):
r"""
Convenience wrapper around :meth:`TOTP.from_source` and :meth:`TOTP.match`.
This parses a TOTP key & configuration from the specified source,
and tries and match the token.
It's designed to parallel the :meth:`zdppy_password_hash.ifc.PasswordHash.verify` method.
:param token:
Token string to match.
:param source:
Serialized TOTP key.
Can be anything accepted by :meth:`TOTP.from_source`.
:param \\*\\*kwds:
All additional keywords passed to :meth:`TOTP.match`.
:return:
A :class:`TotpMatch` instance, or raises a :exc:`TokenError`.
"""
return cls.from_source(source).match(token, **kwds)
def match(self, token, time=None, window=30, skew=0, last_counter=None):
"""
Match TOTP token against specified timestamp.
Searches within a window before & after the provided time,
in order to account for transmission delay and small amounts of skew in the client's clock.
:arg token:
Token to validate.
may be integer or string (whitespace and hyphens are ignored).
:param time:
Unix epoch timestamp, can be any of :class:`!float`, :class:`!int`, or :class:`!datetime`.
if ``None`` (the default), uses current system time.
*this should correspond to the time the token was received from the client*.
:param int window:
How far backward and forward in time to search for a match.
Measured in seconds. Defaults to ``30``. Typically only useful if set
to multiples of :attr:`period`.
:param int skew:
Adjust timestamp by specified value, to account for excessive
client clock skew. Measured in seconds. Defaults to ``0``.
Negative skew (the common case) indicates transmission delay,
and/or that the client clock is running behind the server.
Positive skew indicates the client clock is running ahead of the server
(and by enough that it cancels out any negative skew added by
the transmission delay).
You should ensure the server clock uses a reliable time source such as NTP,
so that only the client clock's inaccuracy needs to be accounted for.
This is an advanced parameter that should usually be left at ``0``;
The **window** parameter is usually enough to account
for any observed transmission delay.
:param last_counter:
Optional value of last counter value that was successfully used.
If specified, verify will never search earlier counters,
no matter how large the window is.
Useful when client has previously authenticated,
and thus should never provide a token older than previously
verified value.
:raises ~zdppy_password_hash.exc.TokenError:
If the token is malformed, fails to match, or has already been used.
:returns TotpMatch:
Returns a :class:`TotpMatch` instance on successful match.
Can be treated as tuple of ``(counter, time)``.
Raises error if token is malformed / can't be verified.
Usage example::
>>> totp = TOTP('s3jdvb7qd2r7jpxx')
>>> # valid token for this time period
>>> totp.match('897212', 1419622729)
<TotpMatch counter=47320757 time=1419622729 cache_seconds=60>
>>> # token from counter step 30 sec ago (within allowed window)
>>> totp.match('000492', 1419622729)
<TotpMatch counter=47320756 time=1419622729 cache_seconds=60>
>>> # invalid token -- token from 60 sec ago (outside of window)
>>> totp.match('760389', 1419622729)
Traceback:
...
InvalidTokenError: Token did not match
"""
time = self.normalize_time(time)
self._check_serial(window, "window")
client_time = time + skew
if last_counter is None:
last_counter = -1
start = max(last_counter, self._time_to_counter(client_time - window))
end = self._time_to_counter(client_time + window) + 1
# XXX: could pass 'expected = _time_to_counter(client_time + TRANSMISSION_DELAY)'
# to the _find_match() method, would help if window set to very large value.
counter = self._find_match(token, start, end)
assert counter >= last_counter, "sanity check failed: counter went backward"
if counter == last_counter:
raise UsedTokenError(expire_time=(last_counter + 1) * self.period)
# NOTE: By returning match tied to <time>, not <client_time>, we're
# causing .skipped to reflect the observed skew, independent of
# the 'skew' param. This is deliberately done so that caller
# can use historical .skipped values to estimate future skew.
return TotpMatch(self, counter, time, window)
def _find_match(self, token, start, end, expected=None):
"""
helper for verify() --
returns counter value within specified range that matches token.
:arg token:
token value to match (will be normalized internally)
:arg start:
starting counter value to check
:arg end:
check up to (but not including) this counter value
:arg expected:
optional expected value where search should start,
to help speed up searches.
:raises ~zdppy_password_hash.exc.TokenError:
If the token is malformed, or fails to verify.
:returns:
counter value that matched
"""
token = self.normalize_token(token)
if start < 0:
start = 0
if end <= start:
raise InvalidTokenError()
generate = self._generate
if not (expected is None or expected < start) and consteq(token, generate(expected)):
return expected
# XXX: if (end - start) is very large (e.g. for resync purposes),
# could start with expected value, and work outward from there,
# alternately checking before & after it until match is found.
# XXX: can't use irange(start, end) here since py2x/win32
# throws error on values >= (1<<31), which 'end' can be.
counter = start
while counter < end:
if consteq(token, generate(counter)):
return counter
counter += 1
raise InvalidTokenError()
#-------------------------------------------------------------------------
# TODO: resync(self, tokens, time=None, min_tokens=10, window=100)
# helper to re-synchronize using series of sequential tokens,
# all of which must validate; per RFC recommendation.
# NOTE: need to make sure this function is constant time
# (i.e. scans ALL tokens, and doesn't short-circuit after first mismatch)
#-------------------------------------------------------------------------
#=============================================================================
# generic parsing
#=============================================================================
@classmethod
def from_source(cls, source):
"""
Load / create a TOTP object from a serialized source.
This acts as a wrapper for the various deserialization methods:
* TOTP URIs are handed off to :meth:`from_uri`
* Any other strings are handed off to :meth:`from_json`
* Dicts are handed off to :meth:`from_dict`
:param source:
Serialized TOTP object.
:raises ValueError:
If the key has been encrypted, but the application secret isn't available;
or if the string cannot be recognized, parsed, or decoded.
See :meth:`TOTP.using()` for how to configure application secrets.
:returns:
a :class:`TOTP` instance.
"""
if isinstance(source, TOTP):
# return object unchanged if they share same wallet.
# otherwise make a new one that's bound to expected wallet.
if cls.wallet == source.wallet:
return source
source = source.to_dict(encrypt=False)
if isinstance(source, dict):
return cls.from_dict(source)
# NOTE: letting to_unicode() raise TypeError in this case
source = to_unicode(source, param="totp source")
if source.startswith("otpauth://"):
return cls.from_uri(source)
else:
return cls.from_json(source)
#=============================================================================
# uri parsing
#=============================================================================
@classmethod
def from_uri(cls, uri):
"""
create an OTP instance from a URI (such as returned by :meth:`to_uri`).
:returns:
:class:`TOTP` instance.
:raises ValueError:
if the uri cannot be parsed or contains errors.
.. seealso:: :ref:`totp-configuring-clients` tutorial for a usage example
"""
# check for valid uri
uri = to_unicode(uri, param="uri").strip()
result = urlparse(uri)
if result.scheme != "otpauth":
raise cls._uri_parse_error("wrong uri scheme")
# validate netloc, and hand off to helper
cls._check_otp_type(result.netloc)
return cls._from_parsed_uri(result)
@classmethod
def _check_otp_type(cls, type):
"""
validate otp URI type is supported.
returns True or raises appropriate error.
"""
if type == "totp":
return True
if type == "hotp":
raise NotImplementedError("HOTP not supported")
raise ValueError("unknown otp type: %r" % type)
@classmethod
def _from_parsed_uri(cls, result):
"""
internal from_uri() helper --
handles parsing a validated TOTP URI
:param result:
a urlparse() instance
:returns:
cls instance
"""
# decode label from uri path
label = result.path
if label.startswith("/") and len(label) > 1:
label = unquote(label[1:])
else:
raise cls._uri_parse_error("missing label")
# extract old-style issuer prefix
if ":" in label:
try:
issuer, label = label.split(":")
except ValueError: # too many ":"
raise cls._uri_parse_error("malformed label")
else:
issuer = None
if label:
# NOTE: KeyURI spec says there may be leading spaces
label = label.strip() or None
# parse query params
params = dict(label=label)
for k, v in parse_qsl(result.query):
if k in params:
raise cls._uri_parse_error("duplicate parameter (%r)" % k)
params[k] = v
# synchronize issuer prefix w/ issuer param
if issuer:
if "issuer" not in params:
params['issuer'] = issuer
elif params['issuer'] != issuer:
raise cls._uri_parse_error("conflicting issuer identifiers")
# convert query params to constructor kwds, and call constructor
return cls(**cls._adapt_uri_params(**params))
@classmethod
def _adapt_uri_params(cls, label=None, secret=None, issuer=None,
digits=None, algorithm=None, period=None,
**extra):
"""
from_uri() helper --
converts uri params into constructor args.
"""
assert label, "from_uri() failed to provide label"
if not secret:
raise cls._uri_parse_error("missing 'secret' parameter")
kwds = dict(label=label, issuer=issuer, key=secret, format="base32")
if digits:
kwds['digits'] = cls._uri_parse_int(digits, "digits")
if algorithm:
kwds['alg'] = algorithm
if period:
kwds['period'] = cls._uri_parse_int(period, "period")
if extra:
# malicious uri, deviation from spec, or newer revision of spec?
# in either case, we issue warning and ignore extra params.
warn("%s: unexpected parameters encountered in otp uri: %r" %
(cls, extra), exc.PasslibRuntimeWarning)
return kwds
@staticmethod
def _uri_parse_error(reason):
"""uri parsing helper -- creates preformatted error message"""
return ValueError("Invalid otpauth uri: %s" % (reason,))
@classmethod
def _uri_parse_int(cls, source, param):
"""uri parsing helper -- int() wrapper"""
try:
return int(source)
except ValueError:
raise cls._uri_parse_error("Malformed %r parameter" % param)
#=============================================================================
# uri rendering
#=============================================================================
def to_uri(self, label=None, issuer=None):
"""
Serialize key and configuration into a URI, per
Google Auth's `KeyUriFormat <http://code.google.com/p/google-authenticator/wiki/KeyUriFormat>`_.
:param str label:
Label to associate with this token when generating a URI.
Displayed to user by most OTP client applications (e.g. Google Authenticator),
and typically has format such as ``"John Smith"`` or ``"[email protected]"``.
Defaults to **label** constructor argument. Must be provided in one or the other location.
May not contain ``:``.
:param str issuer:
String identifying the token issuer (e.g. the domain or canonical name of your service).
Optional but strongly recommended if you're rendering to a URI.
Used internally by some OTP client applications (e.g. Google Authenticator) to distinguish entries
which otherwise have the same label.
Defaults to **issuer** constructor argument, or ``None``.
May not contain ``:``.
:raises ValueError:
* if a label was not provided either as an argument, or in the constructor.
* if the label or issuer contains invalid characters.
:returns:
all the configuration information for this OTP token generator,
encoded into a URI.
These URIs are frequently converted to a QRCode for transferring
to a TOTP client application such as Google Auth.
Usage example::
>>> from zdppy_password_hash.totp import TOTP
>>> tp = TOTP('s3jdvb7qd2r7jpxx')
>>> uri = tp.to_uri("[email protected]", "myservice.another-example.org")
>>> uri
'otpauth://totp/[email protected]?secret=S3JDVB7QD2R7JPXX&issuer=myservice.another-example.org'
.. versionchanged:: 1.7.2
This method now prepends the issuer URI label. This is recommended by the KeyURI
specification, for compatibility with older clients.
"""
# encode label
if label is None:
label = self.label
if not label:
raise ValueError("a label must be specified as argument, or in the constructor")
self._check_label(label)
# NOTE: reference examples in spec seem to indicate the '@' in a label
# shouldn't be escaped, though spec doesn't explicitly address this.
# XXX: is '/' ok to leave unencoded?
label = quote(label, '@')
# encode query parameters
params = self._to_uri_params()
if issuer is None:
issuer = self.issuer
if issuer:
self._check_issuer(issuer)
# NOTE: per KeyURI spec, including issuer as part of label is deprecated,
# in favor of adding it to query params. however, some QRCode clients
# don't recognize the 'issuer' query parameter, so spec recommends (as of 2018-7)
# to include both.
label = "%s:%s" % (quote(issuer, '@'), label)
params.append(("issuer", issuer))
# NOTE: not using urllib.urlencode() because it encodes ' ' as '+';
# but spec says to use '%20', and not sure how fragile
# the various totp clients' parsers are.
param_str = u("&").join(u("%s=%s") % (key, quote(value, '')) for key, value in params)
assert param_str, "param_str should never be empty"
# render uri
return u("otpauth://totp/%s?%s") % (label, param_str)
def _to_uri_params(self):
"""return list of (key, param) entries for URI"""
args = [("secret", self.base32_key)]
if self.alg != "sha1":
args.append(("algorithm", self.alg.upper()))
if self.digits != 6:
args.append(("digits", str(self.digits)))
if self.period != 30:
args.append(("period", str(self.period)))
return args
#=============================================================================
# json rendering / parsing
#=============================================================================
@classmethod
def from_json(cls, source):
"""
Load / create an OTP object from a serialized json string
(as generated by :meth:`to_json`).
:arg json:
Serialized output from :meth:`to_json`, as unicode or ascii bytes.
:raises ValueError:
If the key has been encrypted, but the application secret isn't available;
or if the string cannot be recognized, parsed, or decoded.
See :meth:`TOTP.using()` for how to configure application secrets.
:returns:
a :class:`TOTP` instance.
.. seealso:: :ref:`totp-storing-instances` tutorial for a usage example
"""
source = to_unicode(source, param="json source")
return cls.from_dict(json.loads(source))
def to_json(self, encrypt=None):
"""
Serialize configuration & internal state to a json string,
mainly useful for persisting client-specific state in a database.
All keywords passed to :meth:`to_dict`.
:returns:
json string containing serializes configuration & state.
"""
state = self.to_dict(encrypt=encrypt)
return json.dumps(state, sort_keys=True, separators=(",", ":"))
#=============================================================================
# dict rendering / parsing
#=============================================================================
@classmethod
def from_dict(cls, source):
"""
Load / create a TOTP object from a dictionary
(as generated by :meth:`to_dict`)
:param source:
dict containing serialized TOTP key & configuration.
:raises ValueError:
If the key has been encrypted, but the application secret isn't available;
or if the dict cannot be recognized, parsed, or decoded.
See :meth:`TOTP.using()` for how to configure application secrets.
:returns:
A :class:`TOTP` instance.
.. seealso:: :ref:`totp-storing-instances` tutorial for a usage example
"""
if not isinstance(source, dict) or "type" not in source:
raise cls._dict_parse_error("unrecognized format")
return cls(**cls._adapt_dict_kwds(**source))
@classmethod
def _adapt_dict_kwds(cls, type, **kwds):
"""
Internal helper for .from_json() --
Adapts serialized json dict into constructor keywords.
"""
# default json format is just serialization of constructor kwds.
# XXX: just pass all this through to _from_json / constructor?
# go ahead and mark as changed (needs re-saving) if the version is too old
assert cls._check_otp_type(type)
ver = kwds.pop("v", None)
if not ver or ver < cls.min_json_version or ver > cls.json_version:
raise cls._dict_parse_error("missing/unsupported version (%r)" % (ver,))
elif ver != cls.json_version:
# mark older version as needing re-serializing
kwds['changed'] = True
if 'enckey' in kwds:
# handing encrypted key off to constructor, which handles the
# decryption. this lets it get ahold of (and store) the original
# encrypted key, so if to_json() is called again, the encrypted
# key can be re-used.
# XXX: wallet is known at this point, could decrypt key here.
assert 'key' not in kwds # shouldn't be present w/ enckey
kwds.update(key=kwds.pop("enckey"), format="encrypted")
elif 'key' not in kwds:
raise cls._dict_parse_error("missing 'enckey' / 'key'")
# XXX: could should set changed=True if active wallet is available,
# and source wasn't encrypted.
kwds.pop("last_counter", None) # extract legacy counter parameter
return kwds
@staticmethod
def _dict_parse_error(reason):
"""dict parsing helper -- creates preformatted error message"""
return ValueError("Invalid totp data: %s" % (reason,))
def to_dict(self, encrypt=None):
"""
Serialize configuration & internal state to a dict,
mainly useful for persisting client-specific state in a database.
:param encrypt:
Whether to output should be encrypted.
* ``None`` (the default) -- uses encrypted key if application
secrets are available, otherwise uses plaintext key.
* ``True`` -- uses encrypted key, or raises TypeError
if application secret wasn't provided to OTP constructor.
* ``False`` -- uses raw key.
:returns:
dictionary, containing basic (json serializable) datatypes.
"""
# NOTE: 'type' may seem redundant, but using it so code can try to
# detect that this *is* a TOTP json string / dict.
state = dict(v=self.json_version, type="totp")
if self.alg != "sha1":
state['alg'] = self.alg
if self.digits != 6:
state['digits'] = self.digits
if self.period != 30:
state['period'] = self.period
# XXX: should we include label as part of json format?
if self.label:
state['label'] = self.label
issuer = self.issuer
if issuer and issuer != type(self).issuer:
# (omit issuer if it matches class default)
state['issuer'] = issuer
if encrypt is None:
wallet = self.wallet
encrypt = wallet and wallet.has_secrets
if encrypt:
state['enckey'] = self.encrypted_key
else:
state['key'] = self.base32_key
# NOTE: in the future, may add a "history" parameter
# containing a list of (time, skipped) pairs, encoding
# the last X successful verifications, to allow persisting
# & estimating client clock skew over time.
return state
#=============================================================================
# eoc
#=============================================================================
#=============================================================================
# TOTP helpers
#=============================================================================
class TotpToken(SequenceMixin):
"""
Object returned by :meth:`TOTP.generate`.
It can be treated as a sequence of ``(token, expire_time)``,
or accessed via the following attributes:
.. autoattribute:: token
.. autoattribute:: expire_time
.. autoattribute:: counter
.. autoattribute:: remaining
.. autoattribute:: valid
"""
#: TOTP object that generated this token
totp = None
#: Token as decimal-encoded ascii string.
token = None
#: HOTP counter value used to generate token (derived from time)
counter = None
def __init__(self, totp, token, counter):
"""
.. warning::
the constructor signature is an internal detail, and is subject to change.
"""
self.totp = totp
self.token = token
self.counter = counter
@memoized_property
def start_time(self):
"""Timestamp marking beginning of period when token is valid"""
return self.totp._counter_to_time(self.counter)
@memoized_property
def expire_time(self):
"""Timestamp marking end of period when token is valid"""
return self.totp._counter_to_time(self.counter + 1)
@property
def remaining(self):
"""number of (float) seconds before token expires"""
return max(0, self.expire_time - self.totp.now())
@property
def valid(self):
"""whether token is still valid"""
return bool(self.remaining)
def _as_tuple(self):
return self.token, self.expire_time
def __repr__(self):
expired = "" if self.remaining else " expired"
return "<TotpToken token='%s' expire_time=%d%s>" % \
(self.token, self.expire_time, expired)
class TotpMatch(SequenceMixin):
"""
Object returned by :meth:`TOTP.match` and :meth:`TOTP.verify` on a successful match.
It can be treated as a sequence of ``(counter, time)``,
or accessed via the following attributes:
.. autoattribute:: counter
:annotation: = 0
.. autoattribute:: time
:annotation: = 0
.. autoattribute:: expected_counter
:annotation: = 0
.. autoattribute:: skipped
:annotation: = 0
.. autoattribute:: expire_time
:annotation: = 0
.. autoattribute:: cache_seconds
:annotation: = 60
.. autoattribute:: cache_time
:annotation: = 0
This object will always have a ``True`` boolean value.
"""
#: TOTP object that generated this token
totp = None
#: TOTP counter value which matched token.
#: (Best practice is to subsequently ignore tokens matching this counter
#: or earlier)
counter = 0
#: Timestamp when verification was performed.
time = 0
#: Search window used by verify() (affects cache_time)
window = 30
def __init__(self, totp, counter, time, window=30):
"""
.. warning::
the constructor signature is an internal detail, and is subject to change.
"""
self.totp = totp
self.counter = counter
self.time = time
self.window = window
@memoized_property
def expected_counter(self):
"""
Counter value expected for timestamp.
"""
return self.totp._time_to_counter(self.time)
@memoized_property
def skipped(self):
"""
How many steps were skipped between expected and actual matched counter
value (may be positive, zero, or negative).
"""
return self.counter - self.expected_counter
# @memoized_property
# def start_time(self):
# """Timestamp marking start of period when token is valid"""
# return self.totp._counter_to_time(self.counter + 1)
@memoized_property
def expire_time(self):
"""Timestamp marking end of period when token is valid"""
return self.totp._counter_to_time(self.counter + 1)
@memoized_property
def cache_seconds(self):
"""
Number of seconds counter should be cached
before it's guaranteed to have passed outside of verification window.
"""
# XXX: real value is 'cache_time - now()',
# but this is a cheaper upper bound.
return self.totp.period + self.window
@memoized_property
def cache_time(self):
"""
Timestamp marking when counter has passed outside of verification window.
"""
return self.expire_time + self.window
def _as_tuple(self):
return self.counter, self.time
def __repr__(self):
args = (self.counter, self.time, self.cache_seconds)
return "<TotpMatch counter=%d time=%d cache_seconds=%d>" % args
#=============================================================================
# convenience helpers
#=============================================================================
def generate_secret(entropy=256, charset=BASE64_CHARS[:-2]):
"""
generate a random string suitable for use as an
:class:`AppWallet` application secret.
:param entropy:
number of bits of entropy (controls size/complexity of password).
"""
assert entropy > 0
assert len(charset) > 1
count = int(math.ceil(entropy * math.log(2, len(charset))))
return getrandstr(rng, charset, count)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/totp.py | totp.py |
# XXX: if any platform has problem w/ lazy modules, could support 'non-lazy'
# version which just imports all schemes known to list_crypt_handlers()
from zdppy_password_hash.registry import _proxy
import sys
sys.modules[__name__] = _proxy
#=============================================================================
# HACK: the following bit of code is unreachable, but it's presence seems to
# help make autocomplete work for certain IDEs such as PyCharm.
# this list is automatically regenerated using $SOURCE/admin/regen.py
#=============================================================================
#----------------------------------------------------
# begin autocomplete hack (autogenerated 2016-11-10)
#----------------------------------------------------
if False:
from zdppy_password_hash.handlers.argon2 import argon2
from zdppy_password_hash.handlers.bcrypt import bcrypt, bcrypt_sha256
from zdppy_password_hash.handlers.cisco import cisco_asa, cisco_pix, cisco_type7
from zdppy_password_hash.handlers.des_crypt import bigcrypt, bsdi_crypt, crypt16, des_crypt
from zdppy_password_hash.handlers.digests import hex_md4, hex_md5, hex_sha1, hex_sha256, hex_sha512, htdigest
from zdppy_password_hash.handlers.django import django_bcrypt, django_bcrypt_sha256, django_des_crypt, django_disabled, django_pbkdf2_sha1, django_pbkdf2_sha256, django_salted_md5, django_salted_sha1
from zdppy_password_hash.handlers.fshp import fshp
from zdppy_password_hash.handlers.ldap_digests import ldap_bcrypt, ldap_bsdi_crypt, ldap_des_crypt, ldap_md5, ldap_md5_crypt, ldap_plaintext, ldap_salted_md5, ldap_salted_sha1, ldap_salted_sha256, ldap_salted_sha512, ldap_sha1, ldap_sha1_crypt, ldap_sha256_crypt, ldap_sha512_crypt
from zdppy_password_hash.handlers.md5_crypt import apr_md5_crypt, md5_crypt
from zdppy_password_hash.handlers.misc import plaintext, unix_disabled, unix_fallback
from zdppy_password_hash.handlers.mssql import mssql2000, mssql2005
from zdppy_password_hash.handlers.mysql import mysql323, mysql41
from zdppy_password_hash.handlers.oracle import oracle10, oracle11
from zdppy_password_hash.handlers.pbkdf2 import atlassian_pbkdf2_sha1, cta_pbkdf2_sha1, dlitz_pbkdf2_sha1, grub_pbkdf2_sha512, ldap_pbkdf2_sha1, ldap_pbkdf2_sha256, ldap_pbkdf2_sha512, pbkdf2_sha1, pbkdf2_sha256, pbkdf2_sha512
from zdppy_password_hash.handlers.phpass import phpass
from zdppy_password_hash.handlers.postgres import postgres_md5
from zdppy_password_hash.handlers.roundup import ldap_hex_md5, ldap_hex_sha1, roundup_plaintext
from zdppy_password_hash.handlers.scram import scram
from zdppy_password_hash.handlers.scrypt import scrypt
from zdppy_password_hash.handlers.sha1_crypt import sha1_crypt
from zdppy_password_hash.handlers.sha2_crypt import sha256_crypt, sha512_crypt
from zdppy_password_hash.handlers.sun_md5_crypt import sun_md5_crypt
from zdppy_password_hash.handlers.windows import bsd_nthash, lmhash, msdcc, msdcc2, nthash
#----------------------------------------------------
# end autocomplete hack
#----------------------------------------------------
#=============================================================================
# eoc
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/hash.py | hash.py |
#=============================================================================
# imports
#=============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
# core
import codecs
from collections import defaultdict
try:
from collections.abc import MutableMapping
except ImportError:
# py2 compat
from collections import MutableMapping
from math import ceil, log as logf
import logging; log = logging.getLogger(__name__)
import pkg_resources
import os
# site
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.utils.compat import PY2, irange, itervalues, int_types
from zdppy_password_hash.utils import rng, getrandstr, to_unicode
from zdppy_password_hash.utils.decor import memoized_property
# local
__all__ = [
"genword", "default_charsets",
"genphrase", "default_wordsets",
]
#=============================================================================
# constants
#=============================================================================
# XXX: rename / publically document this map?
entropy_aliases = dict(
# barest protection from throttled online attack
unsafe=12,
# some protection from unthrottled online attack
weak=24,
# some protection from offline attacks
fair=36,
# reasonable protection from offline attacks
strong=48,
# very good protection from offline attacks
secure=60,
)
#=============================================================================
# internal helpers
#=============================================================================
def _superclasses(obj, cls):
"""return remaining classes in object's MRO after cls"""
mro = type(obj).__mro__
return mro[mro.index(cls)+1:]
def _self_info_rate(source):
"""
returns 'rate of self-information' --
i.e. average (per-symbol) entropy of the sequence **source**,
where probability of a given symbol occurring is calculated based on
the number of occurrences within the sequence itself.
if all elements of the source are unique, this should equal ``log(len(source), 2)``.
:arg source:
iterable containing 0+ symbols
(e.g. list of strings or ints, string of characters, etc).
:returns:
float bits of entropy
"""
try:
size = len(source)
except TypeError:
# if len() doesn't work, calculate size by summing counts later
size = None
counts = defaultdict(int)
for char in source:
counts[char] += 1
if size is None:
values = counts.values()
size = sum(values)
else:
values = itervalues(counts)
if not size:
return 0
# NOTE: the following performs ``- sum(value / size * logf(value / size, 2) for value in values)``,
# it just does so with as much pulled out of the sum() loop as possible...
return logf(size, 2) - sum(value * logf(value, 2) for value in values) / size
# def _total_self_info(source):
# """
# return total self-entropy of a sequence
# (the average entropy per symbol * size of sequence)
# """
# return _self_info_rate(source) * len(source)
def _open_asset_path(path, encoding=None):
"""
:param asset_path:
string containing absolute path to file,
or package-relative path using format
``"python.module:relative/file/path"``.
:returns:
filehandle opened in 'rb' mode
(unless encoding explicitly specified)
"""
if encoding:
return codecs.getreader(encoding)(_open_asset_path(path))
if os.path.isabs(path):
return open(path, "rb")
package, sep, subpath = path.partition(":")
if not sep:
raise ValueError("asset path must be absolute file path "
"or use 'pkg.name:sub/path' format: %r" % (path,))
return pkg_resources.resource_stream(package, subpath)
#: type aliases
_sequence_types = (list, tuple)
_set_types = (set, frozenset)
#: set of elements that ensure_unique() has validated already.
_ensure_unique_cache = set()
def _ensure_unique(source, param="source"):
"""
helper for generators --
Throws ValueError if source elements aren't unique.
Error message will display (abbreviated) repr of the duplicates in a string/list
"""
# check cache to speed things up for frozensets / tuples / strings
cache = _ensure_unique_cache
hashable = True
try:
if source in cache:
return True
except TypeError:
hashable = False
# check if it has dup elements
if isinstance(source, _set_types) or len(set(source)) == len(source):
if hashable:
try:
cache.add(source)
except TypeError:
# XXX: under pypy, "list() in set()" above doesn't throw TypeError,
# but trying to add unhashable it to a set *does*.
pass
return True
# build list of duplicate values
seen = set()
dups = set()
for elem in source:
(dups if elem in seen else seen).add(elem)
dups = sorted(dups)
trunc = 8
if len(dups) > trunc:
trunc = 5
dup_repr = ", ".join(repr(str(word)) for word in dups[:trunc])
if len(dups) > trunc:
dup_repr += ", ... plus %d others" % (len(dups) - trunc)
# throw error
raise ValueError("`%s` cannot contain duplicate elements: %s" %
(param, dup_repr))
#=============================================================================
# base generator class
#=============================================================================
class SequenceGenerator(object):
"""
Base class used by word & phrase generators.
These objects take a series of options, corresponding
to those of the :func:`generate` function.
They act as callables which can be used to generate a password
or a list of 1+ passwords. They also expose some read-only
informational attributes.
Parameters
----------
:param entropy:
Optionally specify the amount of entropy the resulting passwords
should contain (as measured with respect to the generator itself).
This will be used to auto-calculate the required password size.
:param length:
Optionally specify the length of password to generate,
measured as count of whatever symbols the subclass uses (characters or words).
Note if ``entropy`` requires a larger minimum length,
that will be used instead.
:param rng:
Optionally provide a custom RNG source to use.
Should be an instance of :class:`random.Random`,
defaults to :class:`random.SystemRandom`.
Attributes
----------
.. autoattribute:: length
.. autoattribute:: symbol_count
.. autoattribute:: entropy_per_symbol
.. autoattribute:: entropy
Subclassing
-----------
Subclasses must implement the ``.__next__()`` method,
and set ``.symbol_count`` before calling base ``__init__`` method.
"""
#=============================================================================
# instance attrs
#=============================================================================
#: requested size of final password
length = None
#: requested entropy of final password
requested_entropy = "strong"
#: random number source to use
rng = rng
#: number of potential symbols (must be filled in by subclass)
symbol_count = None
#=============================================================================
# init
#=============================================================================
def __init__(self, entropy=None, length=None, rng=None, **kwds):
# make sure subclass set things up correctly
assert self.symbol_count is not None, "subclass must set .symbol_count"
# init length & requested entropy
if entropy is not None or length is None:
if entropy is None:
entropy = self.requested_entropy
entropy = entropy_aliases.get(entropy, entropy)
if entropy <= 0:
raise ValueError("`entropy` must be positive number")
min_length = int(ceil(entropy / self.entropy_per_symbol))
if length is None or length < min_length:
length = min_length
self.requested_entropy = entropy
if length < 1:
raise ValueError("`length` must be positive integer")
self.length = length
# init other common options
if rng is not None:
self.rng = rng
# hand off to parent
if kwds and _superclasses(self, SequenceGenerator) == (object,):
raise TypeError("Unexpected keyword(s): %s" % ", ".join(kwds.keys()))
super(SequenceGenerator, self).__init__(**kwds)
#=============================================================================
# informational helpers
#=============================================================================
@memoized_property
def entropy_per_symbol(self):
"""
Average entropy per symbol (assuming all symbols have equal probability)
"""
return logf(self.symbol_count, 2)
@memoized_property
def entropy(self):
"""
Effective entropy of generated passwords.
This value will always be a multiple of :attr:`entropy_per_symbol`.
If entropy is specified in constructor, :attr:`length` will be chosen so
so that this value is the smallest multiple >= :attr:`requested_entropy`.
"""
return self.length * self.entropy_per_symbol
#=============================================================================
# generation
#=============================================================================
def __next__(self):
"""main generation function, should create one password/phrase"""
raise NotImplementedError("implement in subclass")
def __call__(self, returns=None):
"""
frontend used by genword() / genphrase() to create passwords
"""
if returns is None:
return next(self)
elif isinstance(returns, int_types):
return [next(self) for _ in irange(returns)]
elif returns is iter:
return self
else:
raise exc.ExpectedTypeError(returns, "<None>, int, or <iter>", "returns")
def __iter__(self):
return self
if PY2:
def next(self):
return self.__next__()
#=============================================================================
# eoc
#=============================================================================
#=============================================================================
# default charsets
#=============================================================================
#: global dict of predefined characters sets
default_charsets = dict(
# ascii letters, digits, and some punctuation
ascii_72='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*?/',
# ascii letters and digits
ascii_62='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
# ascii_50, without visually similar '1IiLl', '0Oo', '5S', '8B'
ascii_50='234679abcdefghjkmnpqrstuvwxyzACDEFGHJKMNPQRTUVWXYZ',
# lower case hexadecimal
hex='0123456789abcdef',
)
#=============================================================================
# password generator
#=============================================================================
class WordGenerator(SequenceGenerator):
"""
Class which generates passwords by randomly choosing from a string of unique characters.
Parameters
----------
:param chars:
custom character string to draw from.
:param charset:
predefined charset to draw from.
:param \\*\\*kwds:
all other keywords passed to the :class:`SequenceGenerator` parent class.
Attributes
----------
.. autoattribute:: chars
.. autoattribute:: charset
.. autoattribute:: default_charsets
"""
#=============================================================================
# instance attrs
#=============================================================================
#: Predefined character set in use (set to None for instances using custom 'chars')
charset = "ascii_62"
#: string of chars to draw from -- usually filled in from charset
chars = None
#=============================================================================
# init
#=============================================================================
def __init__(self, chars=None, charset=None, **kwds):
# init chars and charset
if chars:
if charset:
raise TypeError("`chars` and `charset` are mutually exclusive")
else:
if not charset:
charset = self.charset
assert charset
chars = default_charsets[charset]
self.charset = charset
chars = to_unicode(chars, param="chars")
_ensure_unique(chars, param="chars")
self.chars = chars
# hand off to parent
super(WordGenerator, self).__init__(**kwds)
# log.debug("WordGenerator(): entropy/char=%r", self.entropy_per_symbol)
#=============================================================================
# informational helpers
#=============================================================================
@memoized_property
def symbol_count(self):
return len(self.chars)
#=============================================================================
# generation
#=============================================================================
def __next__(self):
# XXX: could do things like optionally ensure certain character groups
# (e.g. letters & punctuation) are included
return getrandstr(self.rng, self.chars, self.length)
#=============================================================================
# eoc
#=============================================================================
def genword(entropy=None, length=None, returns=None, **kwds):
"""Generate one or more random passwords.
This function uses :mod:`random.SystemRandom` to generate
one or more passwords using various character sets.
The complexity of the password can be specified
by size, or by the desired amount of entropy.
Usage Example::
>>> # generate a random alphanumeric string with 48 bits of entropy (the default)
>>> from zdppy_password_hash import pwd
>>> pwd.genword()
'DnBHvDjMK6'
>>> # generate a random hexadecimal string with 52 bits of entropy
>>> pwd.genword(entropy=52, charset="hex")
'310f1a7ac793f'
:param entropy:
Strength of resulting password, measured in 'guessing entropy' bits.
An appropriate **length** value will be calculated
based on the requested entropy amount, and the size of the character set.
This can be a positive integer, or one of the following preset
strings: ``"weak"`` (24), ``"fair"`` (36),
``"strong"`` (48), and ``"secure"`` (56).
If neither this or **length** is specified, **entropy** will default
to ``"strong"`` (48).
:param length:
Size of resulting password, measured in characters.
If omitted, the size is auto-calculated based on the **entropy** parameter.
If both **entropy** and **length** are specified,
the stronger value will be used.
:param returns:
Controls what this function returns:
* If ``None`` (the default), this function will generate a single password.
* If an integer, this function will return a list containing that many passwords.
* If the ``iter`` constant, will return an iterator that yields passwords.
:param chars:
Optionally specify custom string of characters to use when randomly
generating a password. This option cannot be combined with **charset**.
:param charset:
The predefined character set to draw from (if not specified by **chars**).
There are currently four presets available:
* ``"ascii_62"`` (the default) -- all digits and ascii upper & lowercase letters.
Provides ~5.95 entropy per character.
* ``"ascii_50"`` -- subset which excludes visually similar characters
(``1IiLl0Oo5S8B``). Provides ~5.64 entropy per character.
* ``"ascii_72"`` -- all digits and ascii upper & lowercase letters,
as well as some punctuation. Provides ~6.17 entropy per character.
* ``"hex"`` -- Lower case hexadecimal. Providers 4 bits of entropy per character.
:returns:
:class:`!unicode` string containing randomly generated password;
or list of 1+ passwords if :samp:`returns={int}` is specified.
"""
gen = WordGenerator(length=length, entropy=entropy, **kwds)
return gen(returns)
#=============================================================================
# default wordsets
#=============================================================================
def _load_wordset(asset_path):
"""
load wordset from compressed datafile within package data.
file should be utf-8 encoded
:param asset_path:
string containing absolute path to wordset file,
or "python.module:relative/file/path".
:returns:
tuple of words, as loaded from specified words file.
"""
# open resource file, convert to tuple of words (strip blank lines & ws)
with _open_asset_path(asset_path, "utf-8") as fh:
gen = (word.strip() for word in fh)
words = tuple(word for word in gen if word)
# NOTE: works but not used
# # detect if file uses "<int> <word>" format, and strip numeric prefix
# def extract(row):
# idx, word = row.replace("\t", " ").split(" ", 1)
# if not idx.isdigit():
# raise ValueError("row is not dice index + word")
# return word
# try:
# extract(words[-1])
# except ValueError:
# pass
# else:
# words = tuple(extract(word) for word in words)
log.debug("loaded %d-element wordset from %r", len(words), asset_path)
return words
class WordsetDict(MutableMapping):
"""
Special mapping used to store dictionary of wordsets.
Different from a regular dict in that some wordsets
may be lazy-loaded from an asset path.
"""
#: dict of key -> asset path
paths = None
#: dict of key -> value
_loaded = None
def __init__(self, *args, **kwds):
self.paths = {}
self._loaded = {}
super(WordsetDict, self).__init__(*args, **kwds)
def __getitem__(self, key):
try:
return self._loaded[key]
except KeyError:
pass
path = self.paths[key]
value = self._loaded[key] = _load_wordset(path)
return value
def set_path(self, key, path):
"""
set asset path to lazy-load wordset from.
"""
self.paths[key] = path
def __setitem__(self, key, value):
self._loaded[key] = value
def __delitem__(self, key):
if key in self:
del self._loaded[key]
self.paths.pop(key, None)
else:
del self.paths[key]
@property
def _keyset(self):
keys = set(self._loaded)
keys.update(self.paths)
return keys
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
# NOTE: speeds things up, and prevents contains from lazy-loading
def __contains__(self, key):
return key in self._loaded or key in self.paths
#: dict of predefined word sets.
#: key is name of wordset, value should be sequence of words.
default_wordsets = WordsetDict()
# register the wordsets built into zdppy_password_hash
for name in "eff_long eff_short eff_prefixed bip39".split():
default_wordsets.set_path(name, "zdppy_password_hash:_data/wordsets/%s.txt" % name)
#=============================================================================
# passphrase generator
#=============================================================================
class PhraseGenerator(SequenceGenerator):
"""class which generates passphrases by randomly choosing
from a list of unique words.
:param wordset:
wordset to draw from.
:param preset:
name of preset wordlist to use instead of ``wordset``.
:param spaces:
whether to insert spaces between words in output (defaults to ``True``).
:param \\*\\*kwds:
all other keywords passed to the :class:`SequenceGenerator` parent class.
.. autoattribute:: wordset
"""
#=============================================================================
# instance attrs
#=============================================================================
#: predefined wordset to use
wordset = "eff_long"
#: list of words to draw from
words = None
#: separator to use when joining words
sep = " "
#=============================================================================
# init
#=============================================================================
def __init__(self, wordset=None, words=None, sep=None, **kwds):
# load wordset
if words is not None:
if wordset is not None:
raise TypeError("`words` and `wordset` are mutually exclusive")
else:
if wordset is None:
wordset = self.wordset
assert wordset
words = default_wordsets[wordset]
self.wordset = wordset
# init words
if not isinstance(words, _sequence_types):
words = tuple(words)
_ensure_unique(words, param="words")
self.words = words
# init separator
if sep is None:
sep = self.sep
sep = to_unicode(sep, param="sep")
self.sep = sep
# hand off to parent
super(PhraseGenerator, self).__init__(**kwds)
##log.debug("PhraseGenerator(): entropy/word=%r entropy/char=%r min_chars=%r",
## self.entropy_per_symbol, self.entropy_per_char, self.min_chars)
#=============================================================================
# informational helpers
#=============================================================================
@memoized_property
def symbol_count(self):
return len(self.words)
#=============================================================================
# generation
#=============================================================================
def __next__(self):
words = (self.rng.choice(self.words) for _ in irange(self.length))
return self.sep.join(words)
#=============================================================================
# eoc
#=============================================================================
def genphrase(entropy=None, length=None, returns=None, **kwds):
"""Generate one or more random password / passphrases.
This function uses :mod:`random.SystemRandom` to generate
one or more passwords; it can be configured to generate
alphanumeric passwords, or full english phrases.
The complexity of the password can be specified
by size, or by the desired amount of entropy.
Usage Example::
>>> # generate random phrase with 48 bits of entropy
>>> from zdppy_password_hash import pwd
>>> pwd.genphrase()
'gangly robbing salt shove'
>>> # generate a random phrase with 52 bits of entropy
>>> # using a particular wordset
>>> pwd.genword(entropy=52, wordset="bip39")
'wheat dilemma reward rescue diary'
:param entropy:
Strength of resulting password, measured in 'guessing entropy' bits.
An appropriate **length** value will be calculated
based on the requested entropy amount, and the size of the word set.
This can be a positive integer, or one of the following preset
strings: ``"weak"`` (24), ``"fair"`` (36),
``"strong"`` (48), and ``"secure"`` (56).
If neither this or **length** is specified, **entropy** will default
to ``"strong"`` (48).
:param length:
Length of resulting password, measured in words.
If omitted, the size is auto-calculated based on the **entropy** parameter.
If both **entropy** and **length** are specified,
the stronger value will be used.
:param returns:
Controls what this function returns:
* If ``None`` (the default), this function will generate a single password.
* If an integer, this function will return a list containing that many passwords.
* If the ``iter`` builtin, will return an iterator that yields passwords.
:param words:
Optionally specifies a list/set of words to use when randomly generating a passphrase.
This option cannot be combined with **wordset**.
:param wordset:
The predefined word set to draw from (if not specified by **words**).
There are currently four presets available:
``"eff_long"`` (the default)
Wordset containing 7776 english words of ~7 letters.
Constructed by the EFF, it offers ~12.9 bits of entropy per word.
This wordset (and the other ``"eff_"`` wordsets)
were `created by the EFF <https://www.eff.org/deeplinks/2016/07/new-wordlists-random-passphrases>`_
to aid in generating passwords. See their announcement page
for more details about the design & properties of these wordsets.
``"eff_short"``
Wordset containing 1296 english words of ~4.5 letters.
Constructed by the EFF, it offers ~10.3 bits of entropy per word.
``"eff_prefixed"``
Wordset containing 1296 english words of ~8 letters,
selected so that they each have a unique 3-character prefix.
Constructed by the EFF, it offers ~10.3 bits of entropy per word.
``"bip39"``
Wordset of 2048 english words of ~5 letters,
selected so that they each have a unique 4-character prefix.
Published as part of Bitcoin's `BIP 39 <https://github.com/bitcoin/bips/blob/master/bip-0039/english.txt>`_,
this wordset has exactly 11 bits of entropy per word.
This list offers words that are typically shorter than ``"eff_long"``
(at the cost of slightly less entropy); and much shorter than
``"eff_prefixed"`` (at the cost of a longer unique prefix).
:param sep:
Optional separator to use when joining words.
Defaults to ``" "`` (a space), but can be an empty string, a hyphen, etc.
:returns:
:class:`!unicode` string containing randomly generated passphrase;
or list of 1+ passphrases if :samp:`returns={int}` is specified.
"""
gen = PhraseGenerator(entropy=entropy, length=length, **kwds)
return gen(returns)
#=============================================================================
# strength measurement
#
# NOTE:
# for a little while, had rough draft of password strength measurement alg here.
# but not sure if there's value in yet another measurement algorithm,
# that's not just duplicating the effort of libraries like zxcbn.
# may revive it later, but for now, leaving some refs to others out there:
# * NIST 800-63 has simple alg
# * zxcvbn (https://tech.dropbox.com/2012/04/zxcvbn-realistic-password-strength-estimation/)
# might also be good, and has approach similar to composite approach i was already thinking about,
# but much more well thought out.
# * passfault (https://github.com/c-a-m/passfault) looks thorough,
# but may have licensing issues, plus porting to python looks like very big job :(
# * give a look at running things through zlib - might be able to cheaply
# catch extra redundancies.
#=============================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/pwd.py | pwd.py |
#=============================================================================
# imports
#=============================================================================
# core
import logging; log = logging.getLogger(__name__)
import sys
# site
# pkg
from zdppy_password_hash.utils.decor import deprecated_method
# local
__all__ = [
"PasswordHash",
]
#=============================================================================
# 2/3 compatibility helpers
#=============================================================================
def recreate_with_metaclass(meta):
"""class decorator that re-creates class using metaclass"""
def builder(cls):
if meta is type(cls):
return cls
return meta(cls.__name__, cls.__bases__, cls.__dict__.copy())
return builder
#=============================================================================
# PasswordHash interface
#=============================================================================
from abc import ABCMeta, abstractmethod, abstractproperty
# TODO: make this actually use abstractproperty(),
# now that we dropped py25, 'abc' is always available.
# XXX: rename to PasswordHasher?
@recreate_with_metaclass(ABCMeta)
class PasswordHash(object):
"""This class describes an abstract interface which all password hashes
in Passlib adhere to. Under Python 2.6 and up, this is an actual
Abstract Base Class built using the :mod:`!abc` module.
See the Passlib docs for full documentation.
"""
#===================================================================
# class attributes
#===================================================================
#---------------------------------------------------------------
# general information
#---------------------------------------------------------------
##name
##setting_kwds
##context_kwds
#: flag which indicates this hasher matches a "disabled" hash
#: (e.g. unix_disabled, or django_disabled); and doesn't actually
#: depend on the provided password.
is_disabled = False
#: Should be None, or a positive integer indicating hash
#: doesn't support secrets larger than this value.
#: Whether hash throws error or silently truncates secret
#: depends on .truncate_error and .truncate_verify_reject flags below.
#: NOTE: calls may treat as boolean, since value will never be 0.
#: .. versionadded:: 1.7
#: .. TODO: zdppy_password_hash 1.8: deprecate/rename this attr to "max_secret_size"?
truncate_size = None
# NOTE: these next two default to the optimistic "ideal",
# most hashes in zdppy_password_hash have to default to False
# for backward compat and/or expected behavior with existing hashes.
#: If True, .hash() should throw a :exc:`~zdppy_password_hash.exc.PasswordSizeError` for
#: any secrets larger than .truncate_size. Many hashers default to False
#: for historical / compatibility purposes, indicating they will silently
#: truncate instead. All such hashers SHOULD support changing
#: the policy via ``.using(truncate_error=True)``.
#: .. versionadded:: 1.7
#: .. TODO: zdppy_password_hash 1.8: deprecate/rename this attr to "truncate_hash_error"?
truncate_error = True
#: If True, .verify() should reject secrets larger than max_password_size.
#: Many hashers default to False for historical / compatibility purposes,
#: indicating they will match on the truncated portion instead.
#: .. versionadded:: 1.7.1
truncate_verify_reject = True
#---------------------------------------------------------------
# salt information -- if 'salt' in setting_kwds
#---------------------------------------------------------------
##min_salt_size
##max_salt_size
##default_salt_size
##salt_chars
##default_salt_chars
#---------------------------------------------------------------
# rounds information -- if 'rounds' in setting_kwds
#---------------------------------------------------------------
##min_rounds
##max_rounds
##default_rounds
##rounds_cost
#---------------------------------------------------------------
# encoding info -- if 'encoding' in context_kwds
#---------------------------------------------------------------
##default_encoding
#===================================================================
# primary methods
#===================================================================
@classmethod
@abstractmethod
def hash(cls, secret, # *
**setting_and_context_kwds): # pragma: no cover -- abstract method
r"""
Hash secret, returning result.
Should handle generating salt, etc, and should return string
containing identifier, salt & other configuration, as well as digest.
:param \\*\\*settings_kwds:
Pass in settings to customize configuration of resulting hash.
.. deprecated:: 1.7
Starting with Passlib 1.7, callers should no longer pass settings keywords
(e.g. ``rounds`` or ``salt`` directly to :meth:`!hash`); should use
``.using(**settings).hash(secret)`` construction instead.
Support will be removed in Passlib 2.0.
:param \\*\\*context_kwds:
Specific algorithms may require context-specific information (such as the user login).
"""
# FIXME: need stub for classes that define .encrypt() instead ...
# this should call .encrypt(), and check for recursion back to here.
raise NotImplementedError("must be implemented by subclass")
@deprecated_method(deprecated="1.7", removed="2.0", replacement=".hash()")
@classmethod
def encrypt(cls, *args, **kwds):
"""
Legacy alias for :meth:`hash`.
.. deprecated:: 1.7
This method was renamed to :meth:`!hash` in version 1.7.
This alias will be removed in version 2.0, and should only
be used for compatibility with Passlib 1.3 - 1.6.
"""
return cls.hash(*args, **kwds)
# XXX: could provide default implementation which hands value to
# hash(), and then does constant-time comparision on the result
# (after making both are same string type)
@classmethod
@abstractmethod
def verify(cls, secret, hash, **context_kwds): # pragma: no cover -- abstract method
"""verify secret against hash, returns True/False"""
raise NotImplementedError("must be implemented by subclass")
#===================================================================
# configuration
#===================================================================
@classmethod
@abstractmethod
def using(cls, relaxed=False, **kwds):
"""
Return another hasher object (typically a subclass of the current one),
which integrates the configuration options specified by ``kwds``.
This should *always* return a new object, even if no configuration options are changed.
.. todo::
document which options are accepted.
:returns:
typically returns a subclass for most hasher implementations.
.. todo::
add this method to main documentation.
"""
raise NotImplementedError("must be implemented by subclass")
#===================================================================
# migration
#===================================================================
@classmethod
def needs_update(cls, hash, secret=None):
"""
check if hash's configuration is outside desired bounds,
or contains some other internal option which requires
updating the password hash.
:param hash:
hash string to examine
:param secret:
optional secret known to have verified against the provided hash.
(this is used by some hashes to detect legacy algorithm mistakes).
:return:
whether secret needs re-hashing.
.. versionadded:: 1.7
"""
# by default, always report that we don't need update
return False
#===================================================================
# additional methods
#===================================================================
@classmethod
@abstractmethod
def identify(cls, hash): # pragma: no cover -- abstract method
"""check if hash belongs to this scheme, returns True/False"""
raise NotImplementedError("must be implemented by subclass")
@deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls, **setting_kwds): # pragma: no cover -- abstract method
"""
compile settings into a configuration string for genhash()
.. deprecated:: 1.7
As of 1.7, this method is deprecated, and slated for complete removal in Passlib 2.0.
For all known real-world uses, hashing a constant string
should provide equivalent functionality.
This deprecation may be reversed if a use-case presents itself in the mean time.
"""
# NOTE: this fallback runs full hash alg, w/ whatever cost param is passed along.
# implementations (esp ones w/ variable cost) will want to subclass this
# with a constant-time implementation that just renders a config string.
if cls.context_kwds:
raise NotImplementedError("must be implemented by subclass")
return cls.using(**setting_kwds).hash("")
@deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, **context):
"""
generated hash for secret, using settings from config/hash string
.. deprecated:: 1.7
As of 1.7, this method is deprecated, and slated for complete removal in Passlib 2.0.
This deprecation may be reversed if a use-case presents itself in the mean time.
"""
# XXX: if hashes reliably offered a .parse() method, could make a fallback for this.
raise NotImplementedError("must be implemented by subclass")
#===================================================================
# undocumented methods / attributes
#===================================================================
# the following entry points are used internally by zdppy_password_hash,
# and aren't documented as part of the exposed interface.
# they are subject to change between releases,
# but are documented here so there's a list of them *somewhere*.
#---------------------------------------------------------------
# extra metdata
#---------------------------------------------------------------
#: this attribute shouldn't be used by hashers themselves,
#: it's reserved for the CryptContext to track which hashers are deprecated.
#: Note the context will only set this on objects it owns (and generated by .using()),
#: and WONT set it on global objects.
#: [added in 1.7]
#: TODO: document this, or at least the use of testing for
#: 'CryptContext().handler().deprecated'
deprecated = False
#: optionally present if hasher corresponds to format built into Django.
#: this attribute (if not None) should be the Django 'algorithm' name.
#: also indicates to zdppy_password_hash.ext.django that (when installed in django),
#: django's native hasher should be used in preference to this one.
## django_name
#---------------------------------------------------------------
# checksum information - defined for many hashes
#---------------------------------------------------------------
## checksum_chars
## checksum_size
#---------------------------------------------------------------
# experimental methods
#---------------------------------------------------------------
##@classmethod
##def normhash(cls, hash):
## """helper to clean up non-canonic instances of hash.
## currently only provided by bcrypt() to fix an historical zdppy_password_hash issue.
## """
# experimental helper to parse hash into components.
##@classmethod
##def parsehash(cls, hash, checksum=True, sanitize=False):
## """helper to parse hash into components, returns dict"""
# experiment helper to estimate bitsize of different hashes,
# implement for GenericHandler, but may be currently be off for some hashes.
# want to expand this into a way to programmatically compare
# "strengths" of different hashes and hash algorithms.
# still needs to have some factor for estimate relative cost per round,
# ala in the style of the scrypt whitepaper.
##@classmethod
##def bitsize(cls, **kwds):
## """returns dict mapping component -> bits contributed.
## components currently include checksum, salt, rounds.
## """
#===================================================================
# eoc
#===================================================================
class DisabledHash(PasswordHash):
"""
extended disabled-hash methods; only need be present if .disabled = True
"""
is_disabled = True
@classmethod
def disable(cls, hash=None):
"""
return string representing a 'disabled' hash;
optionally including previously enabled hash
(this is up to the individual scheme).
"""
# default behavior: ignore original hash, return standalone marker
return cls.hash("")
@classmethod
def enable(cls, hash):
"""
given a disabled-hash string,
extract previously-enabled hash if one is present,
otherwise raises ValueError
"""
# default behavior: no way to restore original hash
raise ValueError("cannot restore original hash")
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/ifc.py | ifc.py |
#=============================================================================
# exceptions
#=============================================================================
class UnknownBackendError(ValueError):
"""
Error raised if multi-backend handler doesn't recognize backend name.
Inherits from :exc:`ValueError`.
.. versionadded:: 1.7
"""
def __init__(self, hasher, backend):
self.hasher = hasher
self.backend = backend
message = "%s: unknown backend: %r" % (hasher.name, backend)
ValueError.__init__(self, message)
# XXX: add a PasslibRuntimeError as base for Missing/Internal/Security runtime errors?
class MissingBackendError(RuntimeError):
"""Error raised if multi-backend handler has no available backends;
or if specifically requested backend is not available.
:exc:`!MissingBackendError` derives
from :exc:`RuntimeError`, since it usually indicates
lack of an external library or OS feature.
This is primarily raised by handlers which depend on
external libraries (which is currently just
:class:`~zdppy_password_hash.hash.bcrypt`).
"""
class InternalBackendError(RuntimeError):
"""
Error raised if something unrecoverable goes wrong with backend call;
such as if ``crypt.crypt()`` returning a malformed hash.
.. versionadded:: 1.7.3
"""
class PasswordValueError(ValueError):
"""
Error raised if a password can't be hashed / verified for various reasons.
This exception derives from the builtin :exc:`!ValueError`.
May be thrown directly when password violates internal invariants of hasher
(e.g. some don't support NULL characters). Hashers may also throw more specific subclasses,
such as :exc:`!PasswordSizeError`.
.. versionadded:: 1.7.3
"""
pass
class PasswordSizeError(PasswordValueError):
"""
Error raised if a password exceeds the maximum size allowed
by Passlib (by default, 4096 characters); or if password exceeds
a hash-specific size limitation.
This exception derives from :exc:`PasswordValueError` (above).
Many password hash algorithms take proportionately larger amounts of time and/or
memory depending on the size of the password provided. This could present
a potential denial of service (DOS) situation if a maliciously large
password is provided to an application. Because of this, Passlib enforces
a maximum size limit, but one which should be *much* larger
than any legitimate password. :exc:`PasswordSizeError` derives
from :exc:`!ValueError`.
.. note::
Applications wishing to use a different limit should set the
``PASSLIB_MAX_PASSWORD_SIZE`` environmental variable before
Passlib is loaded. The value can be any large positive integer.
.. attribute:: max_size
indicates the maximum allowed size.
.. versionadded:: 1.6
"""
max_size = None
def __init__(self, max_size, msg=None):
self.max_size = max_size
if msg is None:
msg = "password exceeds maximum allowed size"
PasswordValueError.__init__(self, msg)
# this also prevents a glibc crypt segfault issue, detailed here ...
# http://www.openwall.com/lists/oss-security/2011/11/15/1
class PasswordTruncateError(PasswordSizeError):
"""
Error raised if password would be truncated by hash.
This derives from :exc:`PasswordSizeError` (above).
Hashers such as :class:`~zdppy_password_hash.hash.bcrypt` can be configured to raises
this error by setting ``truncate_error=True``.
.. attribute:: max_size
indicates the maximum allowed size.
.. versionadded:: 1.7
"""
def __init__(self, cls, msg=None):
if msg is None:
msg = ("Password too long (%s truncates to %d characters)" %
(cls.name, cls.truncate_size))
PasswordSizeError.__init__(self, cls.truncate_size, msg)
class PasslibSecurityError(RuntimeError):
"""
Error raised if critical security issue is detected
(e.g. an attempt is made to use a vulnerable version of a bcrypt backend).
.. versionadded:: 1.6.3
"""
class TokenError(ValueError):
"""
Base error raised by v:mod:`zdppy_password_hash.totp` when
a token can't be parsed / isn't valid / etc.
Derives from :exc:`!ValueError`.
Usually one of the more specific subclasses below will be raised:
* :class:`MalformedTokenError` -- invalid chars, too few digits
* :class:`InvalidTokenError` -- no match found
* :class:`UsedTokenError` -- match found, but token already used
.. versionadded:: 1.7
"""
#: default message to use if none provided -- subclasses may fill this in
_default_message = 'Token not acceptable'
def __init__(self, msg=None, *args, **kwds):
if msg is None:
msg = self._default_message
ValueError.__init__(self, msg, *args, **kwds)
class MalformedTokenError(TokenError):
"""
Error raised by :mod:`zdppy_password_hash.totp` when a token isn't formatted correctly
(contains invalid characters, wrong number of digits, etc)
"""
_default_message = "Unrecognized token"
class InvalidTokenError(TokenError):
"""
Error raised by :mod:`zdppy_password_hash.totp` when a token is formatted correctly,
but doesn't match any tokens within valid range.
"""
_default_message = "Token did not match"
class UsedTokenError(TokenError):
"""
Error raised by :mod:`zdppy_password_hash.totp` if a token is reused.
Derives from :exc:`TokenError`.
.. autoattribute:: expire_time
.. versionadded:: 1.7
"""
_default_message = "Token has already been used, please wait for another."
#: optional value indicating when current counter period will end,
#: and a new token can be generated.
expire_time = None
def __init__(self, *args, **kwds):
self.expire_time = kwds.pop("expire_time", None)
TokenError.__init__(self, *args, **kwds)
class UnknownHashError(ValueError):
"""
Error raised by :class:`~zdppy_password_hash.crypto.lookup_hash` if hash name is not recognized.
This exception derives from :exc:`!ValueError`.
As of version 1.7.3, this may also be raised if hash algorithm is known,
but has been disabled due to FIPS mode (message will include phrase "disabled for fips").
As of version 1.7.4, this may be raised if a :class:`~zdppy_password_hash.context.CryptContext`
is unable to identify the algorithm used by a password hash.
.. versionadded:: 1.7
.. versionchanged: 1.7.3
added 'message' argument.
.. versionchanged:: 1.7.4
altered call signature.
"""
def __init__(self, message=None, value=None):
self.value = value
if message is None:
message = "unknown hash algorithm: %r" % value
self.message = message
ValueError.__init__(self, message, value)
def __str__(self):
return self.message
#=============================================================================
# warnings
#=============================================================================
class PasslibWarning(UserWarning):
"""base class for Passlib's user warnings,
derives from the builtin :exc:`UserWarning`.
.. versionadded:: 1.6
"""
# XXX: there's only one reference to this class, and it will go away in 2.0;
# so can probably remove this along with this / roll this into PasslibHashWarning.
class PasslibConfigWarning(PasslibWarning):
"""Warning issued when non-fatal issue is found related to the configuration
of a :class:`~zdppy_password_hash.context.CryptContext` instance.
This occurs primarily in one of two cases:
* The CryptContext contains rounds limits which exceed the hard limits
imposed by the underlying algorithm.
* An explicit rounds value was provided which exceeds the limits
imposed by the CryptContext.
In both of these cases, the code will perform correctly & securely;
but the warning is issued as a sign the configuration may need updating.
.. versionadded:: 1.6
"""
class PasslibHashWarning(PasslibWarning):
"""Warning issued when non-fatal issue is found with parameters
or hash string passed to a zdppy_password_hash hash class.
This occurs primarily in one of two cases:
* A rounds value or other setting was explicitly provided which
exceeded the handler's limits (and has been clamped
by the :ref:`relaxed<relaxed-keyword>` flag).
* A malformed hash string was encountered which (while parsable)
should be re-encoded.
.. versionadded:: 1.6
"""
class PasslibRuntimeWarning(PasslibWarning):
"""Warning issued when something unexpected happens during runtime.
The fact that it's a warning instead of an error means Passlib
was able to correct for the issue, but that it's anomalous enough
that the developers would love to hear under what conditions it occurred.
.. versionadded:: 1.6
"""
class PasslibSecurityWarning(PasslibWarning):
"""Special warning issued when Passlib encounters something
that might affect security.
.. versionadded:: 1.6
"""
#=============================================================================
# error constructors
#
# note: these functions are used by the hashes in Passlib to raise common
# error messages. They are currently just functions which return ValueError,
# rather than subclasses of ValueError, since the specificity isn't needed
# yet; and who wants to import a bunch of error classes when catching
# ValueError will do?
#=============================================================================
def _get_name(handler):
return handler.name if handler else "<unnamed>"
#------------------------------------------------------------------------
# generic helpers
#------------------------------------------------------------------------
def type_name(value):
"""return pretty-printed string containing name of value's type"""
cls = value.__class__
if cls.__module__ and cls.__module__ not in ["__builtin__", "builtins"]:
return "%s.%s" % (cls.__module__, cls.__name__)
elif value is None:
return 'None'
else:
return cls.__name__
def ExpectedTypeError(value, expected, param):
"""error message when param was supposed to be one type, but found another"""
# NOTE: value is never displayed, since it may sometimes be a password.
name = type_name(value)
return TypeError("%s must be %s, not %s" % (param, expected, name))
def ExpectedStringError(value, param):
"""error message when param was supposed to be unicode or bytes"""
return ExpectedTypeError(value, "unicode or bytes", param)
#------------------------------------------------------------------------
# hash/verify parameter errors
#------------------------------------------------------------------------
def MissingDigestError(handler=None):
"""raised when verify() method gets passed config string instead of hash"""
name = _get_name(handler)
return ValueError("expected %s hash, got %s config string instead" %
(name, name))
def NullPasswordError(handler=None):
"""raised by OS crypt() supporting hashes, which forbid NULLs in password"""
name = _get_name(handler)
return PasswordValueError("%s does not allow NULL bytes in password" % name)
#------------------------------------------------------------------------
# errors when parsing hashes
#------------------------------------------------------------------------
def InvalidHashError(handler=None):
"""error raised if unrecognized hash provided to handler"""
return ValueError("not a valid %s hash" % _get_name(handler))
def MalformedHashError(handler=None, reason=None):
"""error raised if recognized-but-malformed hash provided to handler"""
text = "malformed %s hash" % _get_name(handler)
if reason:
text = "%s (%s)" % (text, reason)
return ValueError(text)
def ZeroPaddedRoundsError(handler=None):
"""error raised if hash was recognized but contained zero-padded rounds field"""
return MalformedHashError(handler, "zero-padded rounds")
#------------------------------------------------------------------------
# settings / hash component errors
#------------------------------------------------------------------------
def ChecksumSizeError(handler, raw=False):
"""error raised if hash was recognized, but checksum was wrong size"""
# TODO: if handler.use_defaults is set, this came from app-provided value,
# not from parsing a hash string, might want different error msg.
checksum_size = handler.checksum_size
unit = "bytes" if raw else "chars"
reason = "checksum must be exactly %d %s" % (checksum_size, unit)
return MalformedHashError(handler, reason)
#=============================================================================
# sensitive info helpers
#=============================================================================
#: global flag, set temporarily by UTs to allow debug_only_repr() to display sensitive values.
ENABLE_DEBUG_ONLY_REPR = False
def debug_only_repr(value, param="hash"):
"""
helper used to display sensitive data (hashes etc) within error messages.
currently returns placeholder test UNLESS unittests are running,
in which case the real value is displayed.
mainly useful to prevent hashes / secrets from being exposed in production tracebacks;
while still being visible from test failures.
NOTE: api subject to change, may formalize this more in the future.
"""
if ENABLE_DEBUG_ONLY_REPR or value is None or isinstance(value, bool):
return repr(value)
return "<%s %s value omitted>" % (param, type(value))
def CryptBackendError(handler, config, hash, # *
source="crypt.crypt()"):
"""
helper to generate standard message when ``crypt.crypt()`` returns invalid result.
takes care of automatically masking contents of config & hash outside of UTs.
"""
name = _get_name(handler)
msg = "%s returned invalid %s hash: config=%s hash=%s" % \
(source, name, debug_only_repr(config), debug_only_repr(hash))
raise InternalBackendError(msg)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/exc.py | exc.py |
from binascii import hexlify, unhexlify
from hashlib import md5
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import right_pad_string, to_unicode, repeat_string, to_bytes
from zdppy_password_hash.utils.binary import h64
from zdppy_password_hash.utils.compat import unicode, u, join_byte_values, \
join_byte_elems, iter_byte_values, uascii_to_str
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"cisco_pix",
"cisco_asa",
"cisco_type7",
]
#=============================================================================
# utils
#=============================================================================
#: dummy bytes used by spoil_digest var in cisco_pix._calc_checksum()
_DUMMY_BYTES = b'\xFF' * 32
#=============================================================================
# cisco pix firewall hash
#=============================================================================
class cisco_pix(uh.HasUserContext, uh.StaticHandler):
"""
This class implements the password hash used by older Cisco PIX firewalls,
and follows the :ref:`password-hash-api`.
It does a single round of hashing, and relies on the username
as the salt.
This class only allows passwords <= 16 bytes, anything larger
will result in a :exc:`~zdppy_password_hash.exc.PasswordSizeError` if passed to :meth:`~cisco_pix.hash`,
and be silently rejected if passed to :meth:`~cisco_pix.verify`.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`,
:meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and
:meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods
all support the following extra keyword:
:param str user:
String containing name of user account this password is associated with.
This is *required* in order to correctly hash passwords associated
with a user account on the Cisco device, as it is used to salt
the hash.
Conversely, this *must* be omitted or set to ``""`` in order to correctly
hash passwords which don't have an associated user account
(such as the "enable" password).
.. versionadded:: 1.6
.. versionchanged:: 1.7.1
Passwords > 16 bytes are now rejected / throw error instead of being silently truncated,
to match Cisco behavior. A number of :ref:`bugs <zdppy_password_hash-asa96-bug>` were fixed
which caused prior releases to generate unverifiable hashes in certain cases.
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "cisco_pix"
truncate_size = 16
# NOTE: these are the default policy for PasswordHash,
# but want to set them explicitly for now.
truncate_error = True
truncate_verify_reject = True
#--------------------
# GenericHandler
#--------------------
checksum_size = 16
checksum_chars = uh.HASH64_CHARS
#--------------------
# custom
#--------------------
#: control flag signalling "cisco_asa" mode, set by cisco_asa class
_is_asa = False
#===================================================================
# methods
#===================================================================
def _calc_checksum(self, secret):
"""
This function implements the "encrypted" hash format used by Cisco
PIX & ASA. It's behavior has been confirmed for ASA 9.6,
but is presumed correct for PIX & other ASA releases,
as it fits with known test vectors, and existing literature.
While nearly the same, the PIX & ASA hashes have slight differences,
so this function performs differently based on the _is_asa class flag.
Noteable changes from PIX to ASA include password size limit
increased from 16 -> 32, and other internal changes.
"""
# select PIX vs or ASA mode
asa = self._is_asa
#
# encode secret
#
# per ASA 8.4 documentation,
# http://www.cisco.com/c/en/us/td/docs/security/asa/asa84/configuration/guide/asa_84_cli_config/ref_cli.html#Supported_Character_Sets,
# it supposedly uses UTF-8 -- though some double-encoding issues have
# been observed when trying to actually *set* a non-ascii password
# via ASDM, and access via SSH seems to strip 8-bit chars.
#
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
#
# check if password too large
#
# Per ASA 9.6 changes listed in
# http://www.cisco.com/c/en/us/td/docs/security/asa/roadmap/asa_new_features.html,
# prior releases had a maximum limit of 32 characters.
# Testing with an ASA 9.6 system bears this out --
# setting 32-char password for a user account,
# and logins will fail if any chars are appended.
# (ASA 9.6 added new PBKDF2-based hash algorithm,
# which supports larger passwords).
#
# Per PIX documentation
# http://www.cisco.com/en/US/docs/security/pix/pix50/configuration/guide/commands.html,
# it would not allow passwords > 16 chars.
#
# Thus, we unconditionally throw a password size error here,
# as nothing valid can come from a larger password.
# NOTE: assuming PIX has same behavior, but at 16 char limit.
#
spoil_digest = None
if len(secret) > self.truncate_size:
if self.use_defaults:
# called from hash()
msg = "Password too long (%s allows at most %d bytes)" % \
(self.name, self.truncate_size)
raise uh.exc.PasswordSizeError(self.truncate_size, msg=msg)
else:
# called from verify() --
# We don't want to throw error, or return early,
# as that would let attacker know too much. Instead, we set a
# flag to add some dummy data into the md5 digest, so that
# output won't match truncated version of secret, or anything
# else that's fixed and predictable.
spoil_digest = secret + _DUMMY_BYTES
#
# append user to secret
#
# Policy appears to be:
#
# * Nothing appended for enable password (user = "")
#
# * ASA: If user present, but secret is >= 28 chars, nothing appended.
#
# * 1-2 byte users not allowed.
# DEVIATION: we're letting them through, and repeating their
# chars ala 3-char user, to simplify testing.
# Could issue warning in the future though.
#
# * 3 byte user has first char repeated, to pad to 4.
# (observed under ASA 9.6, assuming true elsewhere)
#
# * 4 byte users are used directly.
#
# * 5+ byte users are truncated to 4 bytes.
#
user = self.user
if user:
if isinstance(user, unicode):
user = user.encode("utf-8")
if not asa or len(secret) < 28:
secret += repeat_string(user, 4)
#
# pad / truncate result to limit
#
# While PIX always pads to 16 bytes, ASA increases to 32 bytes IFF
# secret+user > 16 bytes. This makes PIX & ASA have different results
# where secret size in range(13,16), and user is present --
# PIX will truncate to 16, ASA will truncate to 32.
#
if asa and len(secret) > 16:
pad_size = 32
else:
pad_size = 16
secret = right_pad_string(secret, pad_size)
#
# md5 digest
#
if spoil_digest:
# make sure digest won't match truncated version of secret
secret += spoil_digest
digest = md5(secret).digest()
#
# drop every 4th byte
# NOTE: guessing this was done because it makes output exactly
# 16 bytes, which may have been a general 'char password[]'
# size limit under PIX
#
digest = join_byte_elems(c for i, c in enumerate(digest) if (i + 1) & 3)
#
# encode using Hash64
#
return h64.encode_bytes(digest).decode("ascii")
# NOTE: works, but needs UTs.
# @classmethod
# def same_as_pix(cls, secret, user=""):
# """
# test whether (secret + user) combination should
# have the same hash under PIX and ASA.
#
# mainly present to help unittests.
# """
# # see _calc_checksum() above for details of this logic.
# size = len(to_bytes(secret, "utf-8"))
# if user and size < 28:
# size += 4
# return size < 17
#===================================================================
# eoc
#===================================================================
class cisco_asa(cisco_pix):
"""
This class implements the password hash used by Cisco ASA/PIX 7.0 and newer (2005).
Aside from a different internal algorithm, it's use and format is identical
to the older :class:`cisco_pix` class.
For passwords less than 13 characters, this should be identical to :class:`!cisco_pix`,
but will generate a different hash for most larger inputs
(See the `Format & Algorithm`_ section for the details).
This class only allows passwords <= 32 bytes, anything larger
will result in a :exc:`~zdppy_password_hash.exc.PasswordSizeError` if passed to :meth:`~cisco_asa.hash`,
and be silently rejected if passed to :meth:`~cisco_asa.verify`.
.. versionadded:: 1.7
.. versionchanged:: 1.7.1
Passwords > 32 bytes are now rejected / throw error instead of being silently truncated,
to match Cisco behavior. A number of :ref:`bugs <zdppy_password_hash-asa96-bug>` were fixed
which caused prior releases to generate unverifiable hashes in certain cases.
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "cisco_asa"
#--------------------
# TruncateMixin
#--------------------
truncate_size = 32
#--------------------
# cisco_pix
#--------------------
_is_asa = True
#===================================================================
# eoc
#===================================================================
#=============================================================================
# type 7
#=============================================================================
class cisco_type7(uh.GenericHandler):
"""
This class implements the "Type 7" password encoding used by Cisco IOS,
and follows the :ref:`password-hash-api`.
It has a simple 4-5 bit salt, but is nonetheless a reversible encoding
instead of a real hash.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: int
:param salt:
This may be an optional salt integer drawn from ``range(0,16)``.
If omitted, one will be chosen at random.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` values that are out of range.
Note that while this class outputs digests in upper-case hexadecimal,
it will accept lower-case as well.
This class also provides the following additional method:
.. automethod:: decode
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "cisco_type7"
setting_kwds = ("salt",)
#--------------------
# GenericHandler
#--------------------
checksum_chars = uh.UPPER_HEX_CHARS
#--------------------
# HasSalt
#--------------------
# NOTE: encoding could handle max_salt_value=99, but since key is only 52
# chars in size, not sure what appropriate behavior is for that edge case.
min_salt_value = 0
max_salt_value = 52
#===================================================================
# methods
#===================================================================
@classmethod
def using(cls, salt=None, **kwds):
subcls = super(cisco_type7, cls).using(**kwds)
if salt is not None:
salt = subcls._norm_salt(salt, relaxed=kwds.get("relaxed"))
subcls._generate_salt = staticmethod(lambda: salt)
return subcls
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
if len(hash) < 2:
raise uh.exc.InvalidHashError(cls)
salt = int(hash[:2]) # may throw ValueError
return cls(salt=salt, checksum=hash[2:].upper())
def __init__(self, salt=None, **kwds):
super(cisco_type7, self).__init__(**kwds)
if salt is not None:
salt = self._norm_salt(salt)
elif self.use_defaults:
salt = self._generate_salt()
assert self._norm_salt(salt) == salt, "generated invalid salt: %r" % (salt,)
else:
raise TypeError("no salt specified")
self.salt = salt
@classmethod
def _norm_salt(cls, salt, relaxed=False):
"""
validate & normalize salt value.
.. note::
the salt for this algorithm is an integer 0-52, not a string
"""
if not isinstance(salt, int):
raise uh.exc.ExpectedTypeError(salt, "integer", "salt")
if 0 <= salt <= cls.max_salt_value:
return salt
msg = "salt/offset must be in 0..52 range"
if relaxed:
warn(msg, uh.PasslibHashWarning)
return 0 if salt < 0 else cls.max_salt_value
else:
raise ValueError(msg)
@staticmethod
def _generate_salt():
return uh.rng.randint(0, 15)
def to_string(self):
return "%02d%s" % (self.salt, uascii_to_str(self.checksum))
def _calc_checksum(self, secret):
# XXX: no idea what unicode policy is, but all examples are
# 7-bit ascii compatible, so using UTF-8
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return hexlify(self._cipher(secret, self.salt)).decode("ascii").upper()
@classmethod
def decode(cls, hash, encoding="utf-8"):
"""decode hash, returning original password.
:arg hash: encoded password
:param encoding: optional encoding to use (defaults to ``UTF-8``).
:returns: password as unicode
"""
self = cls.from_string(hash)
tmp = unhexlify(self.checksum.encode("ascii"))
raw = self._cipher(tmp, self.salt)
return raw.decode(encoding) if encoding else raw
# type7 uses a xor-based vingere variant, using the following secret key:
_key = u("dsfd;kfoA,.iyewrkldJKDHSUBsgvca69834ncxv9873254k;fg87")
@classmethod
def _cipher(cls, data, salt):
"""xor static key against data - encrypts & decrypts"""
key = cls._key
key_size = len(key)
return join_byte_values(
value ^ ord(key[(salt + idx) % key_size])
for idx, value in enumerate(iter_byte_values(data))
)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/cisco.py | cisco.py |
#=============================================================================
# imports
#=============================================================================
# core
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import safe_crypt, test_crypt, to_unicode
from zdppy_password_hash.utils.binary import h64, h64big
from zdppy_password_hash.utils.compat import byte_elem_value, u, uascii_to_str, unicode, suppress_cause
from zdppy_password_hash.crypto.des import des_encrypt_int_block
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"des_crypt",
"bsdi_crypt",
"bigcrypt",
"crypt16",
]
#=============================================================================
# pure-python backend for des_crypt family
#=============================================================================
_BNULL = b'\x00'
def _crypt_secret_to_key(secret):
"""convert secret to 64-bit DES key.
this only uses the first 8 bytes of the secret,
and discards the high 8th bit of each byte at that.
a null parity bit is inserted after every 7th bit of the output.
"""
# NOTE: this would set the parity bits correctly,
# but des_encrypt_int_block() would just ignore them...
##return sum(expand_7bit(byte_elem_value(c) & 0x7f) << (56-i*8)
## for i, c in enumerate(secret[:8]))
return sum((byte_elem_value(c) & 0x7f) << (57-i*8)
for i, c in enumerate(secret[:8]))
def _raw_des_crypt(secret, salt):
"""pure-python backed for des_crypt"""
assert len(salt) == 2
# NOTE: some OSes will accept non-HASH64 characters in the salt,
# but what value they assign these characters varies wildy,
# so just rejecting them outright.
# the same goes for single-character salts...
# some OSes duplicate the char, some insert a '.' char,
# and openbsd does (something) which creates an invalid hash.
salt_value = h64.decode_int12(salt)
# gotta do something - no official policy since this predates unicode
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
assert isinstance(secret, bytes)
# forbidding NULL char because underlying crypt() rejects them too.
if _BNULL in secret:
raise uh.exc.NullPasswordError(des_crypt)
# convert first 8 bytes of secret string into an integer
key_value = _crypt_secret_to_key(secret)
# run data through des using input of 0
result = des_encrypt_int_block(key_value, 0, salt_value, 25)
# run h64 encode on result
return h64big.encode_int64(result)
def _bsdi_secret_to_key(secret):
"""convert secret to DES key used by bsdi_crypt"""
key_value = _crypt_secret_to_key(secret)
idx = 8
end = len(secret)
while idx < end:
next = idx + 8
tmp_value = _crypt_secret_to_key(secret[idx:next])
key_value = des_encrypt_int_block(key_value, key_value) ^ tmp_value
idx = next
return key_value
def _raw_bsdi_crypt(secret, rounds, salt):
"""pure-python backend for bsdi_crypt"""
# decode salt
salt_value = h64.decode_int24(salt)
# gotta do something - no official policy since this predates unicode
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
assert isinstance(secret, bytes)
# forbidding NULL char because underlying crypt() rejects them too.
if _BNULL in secret:
raise uh.exc.NullPasswordError(bsdi_crypt)
# convert secret string into an integer
key_value = _bsdi_secret_to_key(secret)
# run data through des using input of 0
result = des_encrypt_int_block(key_value, 0, salt_value, rounds)
# run h64 encode on result
return h64big.encode_int64(result)
#=============================================================================
# handlers
#=============================================================================
class des_crypt(uh.TruncateMixin, uh.HasManyBackends, uh.HasSalt, uh.GenericHandler):
"""This class implements the des-crypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 2 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:param bool truncate_error:
By default, des_crypt will silently truncate passwords larger than 8 bytes.
Setting ``truncate_error=True`` will cause :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`
to raise a :exc:`~zdppy_password_hash.exc.PasswordTruncateError` instead.
.. versionadded:: 1.7
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "des_crypt"
setting_kwds = ("salt", "truncate_error")
#--------------------
# GenericHandler
#--------------------
checksum_chars = uh.HASH64_CHARS
checksum_size = 11
#--------------------
# HasSalt
#--------------------
min_salt_size = max_salt_size = 2
salt_chars = uh.HASH64_CHARS
#--------------------
# TruncateMixin
#--------------------
truncate_size = 8
#===================================================================
# formatting
#===================================================================
# FORMAT: 2 chars of H64-encoded salt + 11 chars of H64-encoded checksum
_hash_regex = re.compile(u(r"""
^
(?P<salt>[./a-z0-9]{2})
(?P<chk>[./a-z0-9]{11})?
$"""), re.X|re.I)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
salt, chk = hash[:2], hash[2:]
return cls(salt=salt, checksum=chk or None)
def to_string(self):
hash = u("%s%s") % (self.salt, self.checksum)
return uascii_to_str(hash)
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
# check for truncation (during .hash() calls only)
if self.use_defaults:
self._check_truncate_policy(secret)
return self._calc_checksum_backend(secret)
#===================================================================
# backend
#===================================================================
backends = ("os_crypt", "builtin")
#---------------------------------------------------------------
# os_crypt backend
#---------------------------------------------------------------
@classmethod
def _load_backend_os_crypt(cls):
if test_crypt("test", 'abgOeLfPimXQo'):
cls._set_calc_checksum_backend(cls._calc_checksum_os_crypt)
return True
else:
return False
def _calc_checksum_os_crypt(self, secret):
# NOTE: we let safe_crypt() encode unicode secret -> utf8;
# no official policy since des-crypt predates unicode
hash = safe_crypt(secret, self.salt)
if hash is None:
# py3's crypt.crypt() can't handle non-utf8 bytes.
# fallback to builtin alg, which is always available.
return self._calc_checksum_builtin(secret)
if not hash.startswith(self.salt) or len(hash) != 13:
raise uh.exc.CryptBackendError(self, self.salt, hash)
return hash[2:]
#---------------------------------------------------------------
# builtin backend
#---------------------------------------------------------------
@classmethod
def _load_backend_builtin(cls):
cls._set_calc_checksum_backend(cls._calc_checksum_builtin)
return True
def _calc_checksum_builtin(self, secret):
return _raw_des_crypt(secret, self.salt.encode("ascii")).decode("ascii")
#===================================================================
# eoc
#===================================================================
class bsdi_crypt(uh.HasManyBackends, uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""This class implements the BSDi-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 4 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 5001, must be between 1 and 16777215, inclusive.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
:meth:`hash` will now issue a warning if an even number of rounds is used
(see :ref:`bsdi-crypt-security-issues` regarding weak DES keys).
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "bsdi_crypt"
setting_kwds = ("salt", "rounds")
checksum_size = 11
checksum_chars = uh.HASH64_CHARS
#--HasSalt--
min_salt_size = max_salt_size = 4
salt_chars = uh.HASH64_CHARS
#--HasRounds--
default_rounds = 5001
min_rounds = 1
max_rounds = 16777215 # (1<<24)-1
rounds_cost = "linear"
# NOTE: OpenBSD login.conf reports 7250 as minimum allowed rounds,
# but that seems to be an OS policy, not a algorithm limitation.
#===================================================================
# parsing
#===================================================================
_hash_regex = re.compile(u(r"""
^
_
(?P<rounds>[./a-z0-9]{4})
(?P<salt>[./a-z0-9]{4})
(?P<chk>[./a-z0-9]{11})?
$"""), re.X|re.I)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
rounds, salt, chk = m.group("rounds", "salt", "chk")
return cls(
rounds=h64.decode_int24(rounds.encode("ascii")),
salt=salt,
checksum=chk,
)
def to_string(self):
hash = u("_%s%s%s") % (h64.encode_int24(self.rounds).decode("ascii"),
self.salt, self.checksum)
return uascii_to_str(hash)
#===================================================================
# validation
#===================================================================
# NOTE: keeping this flag for admin/choose_rounds.py script.
# want to eventually expose rounds logic to that script in better way.
_avoid_even_rounds = True
@classmethod
def using(cls, **kwds):
subcls = super(bsdi_crypt, cls).using(**kwds)
if not subcls.default_rounds & 1:
# issue warning if caller set an even 'rounds' value.
warn("bsdi_crypt rounds should be odd, as even rounds may reveal weak DES keys",
uh.exc.PasslibSecurityWarning)
return subcls
@classmethod
def _generate_rounds(cls):
rounds = super(bsdi_crypt, cls)._generate_rounds()
# ensure autogenerated rounds are always odd
# NOTE: doing this even for default_rounds so needs_update() doesn't get
# caught in a loop.
# FIXME: this technically might generate a rounds value 1 larger
# than the requested upper bound - but better to err on side of safety.
return rounds|1
#===================================================================
# migration
#===================================================================
def _calc_needs_update(self, **kwds):
# mark bsdi_crypt hashes as deprecated if they have even rounds.
if not self.rounds & 1:
return True
# hand off to base implementation
return super(bsdi_crypt, self)._calc_needs_update(**kwds)
#===================================================================
# backends
#===================================================================
backends = ("os_crypt", "builtin")
#---------------------------------------------------------------
# os_crypt backend
#---------------------------------------------------------------
@classmethod
def _load_backend_os_crypt(cls):
if test_crypt("test", '_/...lLDAxARksGCHin.'):
cls._set_calc_checksum_backend(cls._calc_checksum_os_crypt)
return True
else:
return False
def _calc_checksum_os_crypt(self, secret):
config = self.to_string()
hash = safe_crypt(secret, config)
if hash is None:
# py3's crypt.crypt() can't handle non-utf8 bytes.
# fallback to builtin alg, which is always available.
return self._calc_checksum_builtin(secret)
if not hash.startswith(config[:9]) or len(hash) != 20:
raise uh.exc.CryptBackendError(self, config, hash)
return hash[-11:]
#---------------------------------------------------------------
# builtin backend
#---------------------------------------------------------------
@classmethod
def _load_backend_builtin(cls):
cls._set_calc_checksum_backend(cls._calc_checksum_builtin)
return True
def _calc_checksum_builtin(self, secret):
return _raw_bsdi_crypt(secret, self.rounds, self.salt.encode("ascii")).decode("ascii")
#===================================================================
# eoc
#===================================================================
class bigcrypt(uh.HasSalt, uh.GenericHandler):
"""This class implements the BigCrypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 22 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "bigcrypt"
setting_kwds = ("salt",)
checksum_chars = uh.HASH64_CHARS
# NOTE: checksum chars must be multiple of 11
#--HasSalt--
min_salt_size = max_salt_size = 2
salt_chars = uh.HASH64_CHARS
#===================================================================
# internal helpers
#===================================================================
_hash_regex = re.compile(u(r"""
^
(?P<salt>[./a-z0-9]{2})
(?P<chk>([./a-z0-9]{11})+)?
$"""), re.X|re.I)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
salt, chk = m.group("salt", "chk")
return cls(salt=salt, checksum=chk)
def to_string(self):
hash = u("%s%s") % (self.salt, self.checksum)
return uascii_to_str(hash)
def _norm_checksum(self, checksum, relaxed=False):
checksum = super(bigcrypt, self)._norm_checksum(checksum, relaxed=relaxed)
if len(checksum) % 11:
raise uh.exc.InvalidHashError(self)
return checksum
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
chk = _raw_des_crypt(secret, self.salt.encode("ascii"))
idx = 8
end = len(secret)
while idx < end:
next = idx + 8
chk += _raw_des_crypt(secret[idx:next], chk[-11:-9])
idx = next
return chk.decode("ascii")
#===================================================================
# eoc
#===================================================================
class crypt16(uh.TruncateMixin, uh.HasSalt, uh.GenericHandler):
"""This class implements the crypt16 password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 2 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:param bool truncate_error:
By default, crypt16 will silently truncate passwords larger than 16 bytes.
Setting ``truncate_error=True`` will cause :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`
to raise a :exc:`~zdppy_password_hash.exc.PasswordTruncateError` instead.
.. versionadded:: 1.7
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "crypt16"
setting_kwds = ("salt", "truncate_error")
#--------------------
# GenericHandler
#--------------------
checksum_size = 22
checksum_chars = uh.HASH64_CHARS
#--------------------
# HasSalt
#--------------------
min_salt_size = max_salt_size = 2
salt_chars = uh.HASH64_CHARS
#--------------------
# TruncateMixin
#--------------------
truncate_size = 16
#===================================================================
# internal helpers
#===================================================================
_hash_regex = re.compile(u(r"""
^
(?P<salt>[./a-z0-9]{2})
(?P<chk>[./a-z0-9]{22})?
$"""), re.X|re.I)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
salt, chk = m.group("salt", "chk")
return cls(salt=salt, checksum=chk)
def to_string(self):
hash = u("%s%s") % (self.salt, self.checksum)
return uascii_to_str(hash)
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
# check for truncation (during .hash() calls only)
if self.use_defaults:
self._check_truncate_policy(secret)
# parse salt value
try:
salt_value = h64.decode_int12(self.salt.encode("ascii"))
except ValueError: # pragma: no cover - caught by class
raise suppress_cause(ValueError("invalid chars in salt"))
# convert first 8 byts of secret string into an integer,
key1 = _crypt_secret_to_key(secret)
# run data through des using input of 0
result1 = des_encrypt_int_block(key1, 0, salt_value, 20)
# convert next 8 bytes of secret string into integer (key=0 if secret < 8 chars)
key2 = _crypt_secret_to_key(secret[8:16])
# run data through des using input of 0
result2 = des_encrypt_int_block(key2, 0, salt_value, 5)
# done
chk = h64big.encode_int64(result1) + h64big.encode_int64(result2)
return chk.decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/des_crypt.py | des_crypt.py |
from __future__ import with_statement, absolute_import
# core
import logging
log = logging.getLogger(__name__)
import re
import types
from warnings import warn
# site
_argon2_cffi = None # loaded below
_argon2pure = None # dynamically imported by _load_backend_argon2pure()
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.crypto.digest import MAX_UINT32
from zdppy_password_hash.utils import classproperty, to_bytes, render_bytes
from zdppy_password_hash.utils.binary import b64s_encode, b64s_decode
from zdppy_password_hash.utils.compat import u, unicode, bascii_to_str, uascii_to_str, PY2
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"argon2",
]
#=============================================================================
# helpers
#=============================================================================
# NOTE: when adding a new argon2 hash type, need to do the following:
# * add TYPE_XXX constant, and add to ALL_TYPES
# * make sure "_backend_type_map" constructors handle it correctly for all backends
# * make sure _hash_regex & _ident_regex (below) support type string.
# * add reference vectors for testing.
#: argon2 type constants -- subclasses handle mapping these to backend-specific type constants.
#: (should be lowercase, to match representation in hash string)
TYPE_I = u("i")
TYPE_D = u("d")
TYPE_ID = u("id") # new 2016-10-29; zdppy_password_hash 1.7.2 requires backends new enough for support
#: list of all known types; first (supported) type will be used as default.
ALL_TYPES = (TYPE_ID, TYPE_I, TYPE_D)
ALL_TYPES_SET = set(ALL_TYPES)
#=============================================================================
# import argon2 package (https://pypi.python.org/pypi/argon2_cffi)
#=============================================================================
# import cffi package
# NOTE: we try to do this even if caller is going to use argon2pure,
# so that we can always use the libargon2 default settings when possible.
_argon2_cffi_error = None
try:
import argon2 as _argon2_cffi
except ImportError:
_argon2_cffi = None
else:
if not hasattr(_argon2_cffi, "Type"):
# they have incompatible "argon2" package installed, instead of "argon2_cffi" package.
_argon2_cffi_error = (
"'argon2' module points to unsupported 'argon2' pypi package; "
"please install 'argon2-cffi' instead."
)
_argon2_cffi = None
elif not hasattr(_argon2_cffi, "low_level"):
# they have pre-v16 argon2_cffi package
_argon2_cffi_error = "'argon2-cffi' is too old, please update to argon2_cffi >= 18.2.0"
_argon2_cffi = None
# init default settings for our hasher class --
# if we have argon2_cffi >= 16.0, use their default hasher settings, otherwise use static default
if hasattr(_argon2_cffi, "PasswordHasher"):
# use cffi's default settings
_default_settings = _argon2_cffi.PasswordHasher()
_default_version = _argon2_cffi.low_level.ARGON2_VERSION
else:
# use fallback settings (for no backend, or argon2pure)
class _DummyCffiHasher:
"""
dummy object to use as source of defaults when argon2_cffi isn't present.
this tries to mimic the attributes of ``argon2.PasswordHasher()`` which the rest of
this module reads.
.. note:: values last synced w/ argon2 19.2 as of 2019-11-09
"""
time_cost = 2
memory_cost = 512
parallelism = 2
salt_len = 16
hash_len = 16
# NOTE: "type" attribute added in argon2_cffi v18.2; but currently not reading it
# type = _argon2_cffi.Type.ID
_default_settings = _DummyCffiHasher()
_default_version = 0x13 # v1.9
#=============================================================================
# handler
#=============================================================================
class _Argon2Common(uh.SubclassBackendMixin, uh.ParallelismMixin,
uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum,
uh.GenericHandler):
"""
Base class which implements brunt of Argon2 code.
This is then subclassed by the various backends,
to override w/ backend-specific methods.
When a backend is loaded, the bases of the 'argon2' class proper
are modified to prepend the correct backend-specific subclass.
"""
#===================================================================
# class attrs
#===================================================================
#------------------------
# PasswordHash
#------------------------
name = "argon2"
setting_kwds = ("salt",
"salt_size",
"salt_len", # 'salt_size' alias for compat w/ argon2 package
"rounds",
"time_cost", # 'rounds' alias for compat w/ argon2 package
"memory_cost",
"parallelism",
"digest_size",
"hash_len", # 'digest_size' alias for compat w/ argon2 package
"type", # the type of argon2 hash used
)
# TODO: could support the optional 'data' parameter,
# but need to research the uses, what a more descriptive name would be,
# and deal w/ fact that argon2_cffi 16.1 doesn't currently support it.
# (argon2_pure does though)
#------------------------
# GenericHandler
#------------------------
# NOTE: ident -- all argon2 hashes start with "$argon2<type>$"
# XXX: could programmaticaly generate "ident_values" string from ALL_TYPES above
checksum_size = _default_settings.hash_len
#: force parsing these kwds
_always_parse_settings = uh.GenericHandler._always_parse_settings + \
("type",)
#: exclude these kwds from parsehash() result (most are aliases for other keys)
_unparsed_settings = uh.GenericHandler._unparsed_settings + \
("salt_len", "time_cost", "hash_len", "digest_size")
#------------------------
# HasSalt
#------------------------
default_salt_size = _default_settings.salt_len
min_salt_size = 8
max_salt_size = MAX_UINT32
#------------------------
# HasRounds
# TODO: once rounds limit logic is factored out,
# make 'rounds' and 'cost' an alias for 'time_cost'
#------------------------
default_rounds = _default_settings.time_cost
min_rounds = 1
max_rounds = MAX_UINT32
rounds_cost = "linear"
#------------------------
# ParalleismMixin
#------------------------
max_parallelism = (1 << 24) - 1 # from argon2.h / ARGON2_MAX_LANES
#------------------------
# custom
#------------------------
#: max version support
#: NOTE: this is dependant on the backend, and initialized/modified by set_backend()
max_version = _default_version
#: minimum version before needs_update() marks the hash; if None, defaults to max_version
min_desired_version = None
#: minimum valid memory_cost
min_memory_cost = 8 # from argon2.h / ARGON2_MIN_MEMORY
#: maximum number of threads (-1=unlimited);
#: number of threads used by .hash() will be min(parallelism, max_threads)
max_threads = -1
#: global flag signalling argon2pure backend to use threads
#: rather than subprocesses.
pure_use_threads = False
#: internal helper used to store mapping of TYPE_XXX constants -> backend-specific type constants;
#: this is populated by _load_backend_mixin(); and used to detect which types are supported.
#: XXX: could expose keys as class-level .supported_types property?
_backend_type_map = {}
@classproperty
def type_values(cls):
"""
return tuple of types supported by this backend
.. versionadded:: 1.7.2
"""
cls.get_backend() # make sure backend is loaded
return tuple(cls._backend_type_map)
#===================================================================
# instance attrs
#===================================================================
#: argon2 hash type, one of ALL_TYPES -- class value controls the default
#: .. versionadded:: 1.7.2
type = TYPE_ID
#: parallelism setting -- class value controls the default
parallelism = _default_settings.parallelism
#: hash version (int)
#: NOTE: this is modified by set_backend()
version = _default_version
#: memory cost -- class value controls the default
memory_cost = _default_settings.memory_cost
@property
def type_d(self):
"""
flag indicating a Type D hash
.. deprecated:: 1.7.2; will be removed in zdppy_password_hash 2.0
"""
return self.type == TYPE_D
#: optional secret data
data = None
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, type=None, memory_cost=None, salt_len=None, time_cost=None, digest_size=None,
checksum_size=None, hash_len=None, max_threads=None, **kwds):
# support aliases which match argon2 naming convention
if time_cost is not None:
if "rounds" in kwds:
raise TypeError("'time_cost' and 'rounds' are mutually exclusive")
kwds['rounds'] = time_cost
if salt_len is not None:
if "salt_size" in kwds:
raise TypeError("'salt_len' and 'salt_size' are mutually exclusive")
kwds['salt_size'] = salt_len
if hash_len is not None:
if digest_size is not None:
raise TypeError("'hash_len' and 'digest_size' are mutually exclusive")
digest_size = hash_len
if checksum_size is not None:
if digest_size is not None:
raise TypeError("'checksum_size' and 'digest_size' are mutually exclusive")
digest_size = checksum_size
# create variant
subcls = super(_Argon2Common, cls).using(**kwds)
# set type
if type is not None:
subcls.type = subcls._norm_type(type)
# set checksum size
relaxed = kwds.get("relaxed")
if digest_size is not None:
if isinstance(digest_size, uh.native_string_types):
digest_size = int(digest_size)
# NOTE: this isn't *really* digest size minimum, but want to enforce secure minimum.
subcls.checksum_size = uh.norm_integer(subcls, digest_size, min=16, max=MAX_UINT32,
param="digest_size", relaxed=relaxed)
# set memory cost
if memory_cost is not None:
if isinstance(memory_cost, uh.native_string_types):
memory_cost = int(memory_cost)
subcls.memory_cost = subcls._norm_memory_cost(memory_cost, relaxed=relaxed)
# validate constraints
subcls._validate_constraints(subcls.memory_cost, subcls.parallelism)
# set max threads
if max_threads is not None:
if isinstance(max_threads, uh.native_string_types):
max_threads = int(max_threads)
if max_threads < 1 and max_threads != -1:
raise ValueError("max_threads (%d) must be -1 (unlimited), or at least 1." %
(max_threads,))
subcls.max_threads = max_threads
return subcls
@classmethod
def _validate_constraints(cls, memory_cost, parallelism):
# NOTE: this is used by class & instance, hence passing in via arguments.
# could switch and make this a hybrid method.
min_memory_cost = 8 * parallelism
if memory_cost < min_memory_cost:
raise ValueError("%s: memory_cost (%d) is too low, must be at least "
"8 * parallelism (8 * %d = %d)" %
(cls.name, memory_cost,
parallelism, min_memory_cost))
#===================================================================
# public api
#===================================================================
#: shorter version of _hash_regex, used to quickly identify hashes
_ident_regex = re.compile(r"^\$argon2[a-z]+\$")
@classmethod
def identify(cls, hash):
hash = uh.to_unicode_for_identify(hash)
return cls._ident_regex.match(hash) is not None
# hash(), verify(), genhash() -- implemented by backend subclass
#===================================================================
# hash parsing / rendering
#===================================================================
# info taken from source of decode_string() function in
# <https://github.com/P-H-C/phc-winner-argon2/blob/master/src/encoding.c>
#
# hash format:
# $argon2<T>[$v=<num>]$m=<num>,t=<num>,p=<num>[,keyid=<bin>][,data=<bin>][$<bin>[$<bin>]]
#
# NOTE: as of 2016-6-17, the official source (above) lists the "keyid" param in the comments,
# but the actual source of decode_string & encode_string don't mention it at all.
# we're supporting parsing it, but throw NotImplementedError if encountered.
#
# sample hashes:
# v1.0: '$argon2i$m=512,t=2,p=2$5VtWOO3cGWYQHEMaYGbsfQ$AcmqasQgW/wI6wAHAMk4aQ'
# v1.3: '$argon2i$v=19$m=512,t=2,p=2$5VtWOO3cGWYQHEMaYGbsfQ$AcmqasQgW/wI6wAHAMk4aQ'
#: regex to parse argon hash
_hash_regex = re.compile(br"""
^
\$argon2(?P<type>[a-z]+)\$
(?:
v=(?P<version>\d+)
\$
)?
m=(?P<memory_cost>\d+)
,
t=(?P<time_cost>\d+)
,
p=(?P<parallelism>\d+)
(?:
,keyid=(?P<keyid>[^,$]+)
)?
(?:
,data=(?P<data>[^,$]+)
)?
(?:
\$
(?P<salt>[^$]+)
(?:
\$
(?P<digest>.+)
)?
)?
$
""", re.X)
@classmethod
def from_string(cls, hash):
# NOTE: assuming hash will be unicode, or use ascii-compatible encoding.
# TODO: switch to working w/ str or unicode
if isinstance(hash, unicode):
hash = hash.encode("utf-8")
if not isinstance(hash, bytes):
raise exc.ExpectedStringError(hash, "hash")
m = cls._hash_regex.match(hash)
if not m:
raise exc.MalformedHashError(cls)
type, version, memory_cost, time_cost, parallelism, keyid, data, salt, digest = \
m.group("type", "version", "memory_cost", "time_cost", "parallelism",
"keyid", "data", "salt", "digest")
if keyid:
raise NotImplementedError("argon2 'keyid' parameter not supported")
return cls(
type=type.decode("ascii"),
version=int(version) if version else 0x10,
memory_cost=int(memory_cost),
rounds=int(time_cost),
parallelism=int(parallelism),
salt=b64s_decode(salt) if salt else None,
data=b64s_decode(data) if data else None,
checksum=b64s_decode(digest) if digest else None,
)
def to_string(self):
version = self.version
if version == 0x10:
vstr = ""
else:
vstr = "v=%d$" % version
data = self.data
if data:
kdstr = ",data=" + bascii_to_str(b64s_encode(self.data))
else:
kdstr = ""
# NOTE: 'keyid' param currently not supported
return "$argon2%s$%sm=%d,t=%d,p=%d%s$%s$%s" % (
uascii_to_str(self.type),
vstr,
self.memory_cost,
self.rounds,
self.parallelism,
kdstr,
bascii_to_str(b64s_encode(self.salt)),
bascii_to_str(b64s_encode(self.checksum)),
)
#===================================================================
# init
#===================================================================
def __init__(self, type=None, type_d=False, version=None, memory_cost=None, data=None, **kwds):
# handle deprecated kwds
if type_d:
warn('argon2 `type_d=True` keyword is deprecated, and will be removed in zdppy_password_hash 2.0; '
'please use ``type="d"`` instead')
assert type is None
type = TYPE_D
# TODO: factor out variable checksum size support into a mixin.
# set checksum size to specific value before _norm_checksum() is called
checksum = kwds.get("checksum")
if checksum is not None:
self.checksum_size = len(checksum)
# call parent
super(_Argon2Common, self).__init__(**kwds)
# init type
if type is None:
assert uh.validate_default_value(self, self.type, self._norm_type, param="type")
else:
self.type = self._norm_type(type)
# init version
if version is None:
assert uh.validate_default_value(self, self.version, self._norm_version,
param="version")
else:
self.version = self._norm_version(version)
# init memory cost
if memory_cost is None:
assert uh.validate_default_value(self, self.memory_cost, self._norm_memory_cost,
param="memory_cost")
else:
self.memory_cost = self._norm_memory_cost(memory_cost)
# init data
if data is None:
assert self.data is None
else:
if not isinstance(data, bytes):
raise uh.exc.ExpectedTypeError(data, "bytes", "data")
self.data = data
#-------------------------------------------------------------------
# parameter guards
#-------------------------------------------------------------------
@classmethod
def _norm_type(cls, value):
# type check
if not isinstance(value, unicode):
if PY2 and isinstance(value, bytes):
value = value.decode('ascii')
else:
raise uh.exc.ExpectedTypeError(value, "str", "type")
# check if type is valid
if value in ALL_TYPES_SET:
return value
# translate from uppercase
temp = value.lower()
if temp in ALL_TYPES_SET:
return temp
# failure!
raise ValueError("unknown argon2 hash type: %r" % (value,))
@classmethod
def _norm_version(cls, version):
if not isinstance(version, uh.int_types):
raise uh.exc.ExpectedTypeError(version, "integer", "version")
# minimum valid version
if version < 0x13 and version != 0x10:
raise ValueError("invalid argon2 hash version: %d" % (version,))
# check this isn't past backend's max version
backend = cls.get_backend()
if version > cls.max_version:
raise ValueError("%s: hash version 0x%X not supported by %r backend "
"(max version is 0x%X); try updating or switching backends" %
(cls.name, version, backend, cls.max_version))
return version
@classmethod
def _norm_memory_cost(cls, memory_cost, relaxed=False):
return uh.norm_integer(cls, memory_cost, min=cls.min_memory_cost,
param="memory_cost", relaxed=relaxed)
#===================================================================
# digest calculation
#===================================================================
# NOTE: _calc_checksum implemented by backend subclass
@classmethod
def _get_backend_type(cls, value):
"""
helper to resolve backend constant from type
"""
try:
return cls._backend_type_map[value]
except KeyError:
pass
# XXX: pick better error class?
msg = "unsupported argon2 hash (type %r not supported by %s backend)" % \
(value, cls.get_backend())
raise ValueError(msg)
#===================================================================
# hash migration
#===================================================================
def _calc_needs_update(self, **kwds):
cls = type(self)
if self.type != cls.type:
return True
minver = cls.min_desired_version
if minver is None or minver > cls.max_version:
minver = cls.max_version
if self.version < minver:
# version is too old.
return True
if self.memory_cost != cls.memory_cost:
return True
if self.checksum_size != cls.checksum_size:
return True
return super(_Argon2Common, self)._calc_needs_update(**kwds)
#===================================================================
# backend loading
#===================================================================
_no_backend_suggestion = " -- recommend you install one (e.g. 'pip install argon2_cffi')"
@classmethod
def _finalize_backend_mixin(mixin_cls, name, dryrun):
"""
helper called by from backend mixin classes' _load_backend_mixin() --
invoked after backend imports have been loaded, and performs
feature detection & testing common to all backends.
"""
# check argon2 version
max_version = mixin_cls.max_version
assert isinstance(max_version, int) and max_version >= 0x10
if max_version < 0x13:
warn("%r doesn't support argon2 v1.3, and should be upgraded" % name,
uh.exc.PasslibSecurityWarning)
# prefer best available type
for type in ALL_TYPES:
if type in mixin_cls._backend_type_map:
mixin_cls.type = type
break
else:
warn("%r lacks support for all known hash types" % name, uh.exc.PasslibRuntimeWarning)
# NOTE: class will just throw "unsupported argon2 hash" error if they try to use it...
mixin_cls.type = TYPE_ID
return True
@classmethod
def _adapt_backend_error(cls, err, hash=None, self=None):
"""
internal helper invoked when backend has hash/verification error;
used to adapt to zdppy_password_hash message.
"""
backend = cls.get_backend()
# parse hash to throw error if format was invalid, parameter out of range, etc.
if self is None and hash is not None:
self = cls.from_string(hash)
# check constraints on parsed object
# XXX: could move this to __init__, but not needed by needs_update calls
if self is not None:
self._validate_constraints(self.memory_cost, self.parallelism)
# as of cffi 16.1, lacks support in hash_secret(), so genhash() will get here.
# as of cffi 16.2, support removed from verify_secret() as well.
if backend == "argon2_cffi" and self.data is not None:
raise NotImplementedError("argon2_cffi backend doesn't support the 'data' parameter")
# fallback to reporting a malformed hash
text = str(err)
if text not in [
"Decoding failed" # argon2_cffi's default message
]:
reason = "%s reported: %s: hash=%r" % (backend, text, hash)
else:
reason = repr(hash)
raise exc.MalformedHashError(cls, reason=reason)
#===================================================================
# eoc
#===================================================================
#-----------------------------------------------------------------------
# stub backend
#-----------------------------------------------------------------------
class _NoBackend(_Argon2Common):
"""
mixin used before any backend has been loaded.
contains stubs that force loading of one of the available backends.
"""
#===================================================================
# primary methods
#===================================================================
@classmethod
def hash(cls, secret):
cls._stub_requires_backend()
return cls.hash(secret)
@classmethod
def verify(cls, secret, hash):
cls._stub_requires_backend()
return cls.verify(secret, hash)
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config):
cls._stub_requires_backend()
return cls.genhash(secret, config)
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
# NOTE: since argon2_cffi takes care of rendering hash,
# _calc_checksum() is only used by the argon2pure backend.
self._stub_requires_backend()
# NOTE: have to use super() here so that we don't recursively
# call subclass's wrapped _calc_checksum
return super(argon2, self)._calc_checksum(secret)
#===================================================================
# eoc
#===================================================================
#-----------------------------------------------------------------------
# argon2_cffi backend
#-----------------------------------------------------------------------
class _CffiBackend(_Argon2Common):
"""
argon2_cffi backend
"""
#===================================================================
# backend loading
#===================================================================
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
# make sure we write info to base class's __dict__, not that of a subclass
assert mixin_cls is _CffiBackend
# we automatically import this at top, so just grab info
if _argon2_cffi is None:
if _argon2_cffi_error:
raise exc.PasslibSecurityError(_argon2_cffi_error)
return False
max_version = _argon2_cffi.low_level.ARGON2_VERSION
log.debug("detected 'argon2_cffi' backend, version %r, with support for 0x%x argon2 hashes",
_argon2_cffi.__version__, max_version)
# build type map
TypeEnum = _argon2_cffi.Type
type_map = {}
for type in ALL_TYPES:
try:
type_map[type] = getattr(TypeEnum, type.upper())
except AttributeError:
# TYPE_ID support not added until v18.2
assert type not in (TYPE_I, TYPE_D), "unexpected missing type: %r" % type
mixin_cls._backend_type_map = type_map
# set version info, and run common setup
mixin_cls.version = mixin_cls.max_version = max_version
return mixin_cls._finalize_backend_mixin(name, dryrun)
#===================================================================
# primary methods
#===================================================================
@classmethod
def hash(cls, secret):
# TODO: add in 'encoding' support once that's finalized in 1.8 / 1.9.
uh.validate_secret(secret)
secret = to_bytes(secret, "utf-8")
# XXX: doesn't seem to be a way to make this honor max_threads
try:
return bascii_to_str(_argon2_cffi.low_level.hash_secret(
type=cls._get_backend_type(cls.type),
memory_cost=cls.memory_cost,
time_cost=cls.default_rounds,
parallelism=cls.parallelism,
salt=to_bytes(cls._generate_salt()),
hash_len=cls.checksum_size,
secret=secret,
))
except _argon2_cffi.exceptions.HashingError as err:
raise cls._adapt_backend_error(err)
#: helper for verify() method below -- maps prefixes to type constants
_byte_ident_map = dict((render_bytes(b"$argon2%s$", type.encode("ascii")), type)
for type in ALL_TYPES)
@classmethod
def verify(cls, secret, hash):
# TODO: add in 'encoding' support once that's finalized in 1.8 / 1.9.
uh.validate_secret(secret)
secret = to_bytes(secret, "utf-8")
hash = to_bytes(hash, "ascii")
# read type from start of hash
# NOTE: don't care about malformed strings, lowlevel will throw error for us
type = cls._byte_ident_map.get(hash[:1+hash.find(b"$", 1)], TYPE_I)
type_code = cls._get_backend_type(type)
# XXX: doesn't seem to be a way to make this honor max_threads
try:
result = _argon2_cffi.low_level.verify_secret(hash, secret, type_code)
assert result is True
return True
except _argon2_cffi.exceptions.VerifyMismatchError:
return False
except _argon2_cffi.exceptions.VerificationError as err:
raise cls._adapt_backend_error(err, hash=hash)
# NOTE: deprecated, will be removed in 2.0
@classmethod
def genhash(cls, secret, config):
# TODO: add in 'encoding' support once that's finalized in 1.8 / 1.9.
uh.validate_secret(secret)
secret = to_bytes(secret, "utf-8")
self = cls.from_string(config)
# XXX: doesn't seem to be a way to make this honor max_threads
try:
result = bascii_to_str(_argon2_cffi.low_level.hash_secret(
type=cls._get_backend_type(self.type),
memory_cost=self.memory_cost,
time_cost=self.rounds,
parallelism=self.parallelism,
salt=to_bytes(self.salt),
hash_len=self.checksum_size,
secret=secret,
version=self.version,
))
except _argon2_cffi.exceptions.HashingError as err:
raise cls._adapt_backend_error(err, hash=config)
if self.version == 0x10:
# workaround: argon2 0x13 always returns "v=" segment, even for 0x10 hashes
result = result.replace("$v=16$", "$")
return result
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
raise AssertionError("shouldn't be called under argon2_cffi backend")
#===================================================================
# eoc
#===================================================================
#-----------------------------------------------------------------------
# argon2pure backend
#-----------------------------------------------------------------------
class _PureBackend(_Argon2Common):
"""
argon2pure backend
"""
#===================================================================
# backend loading
#===================================================================
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
# make sure we write info to base class's __dict__, not that of a subclass
assert mixin_cls is _PureBackend
# import argon2pure
global _argon2pure
try:
import argon2pure as _argon2pure
except ImportError:
return False
# get default / max supported version -- added in v1.2.2
try:
from argon2pure import ARGON2_DEFAULT_VERSION as max_version
except ImportError:
log.warning("detected 'argon2pure' backend, but package is too old "
"(zdppy_password_hash requires argon2pure >= 1.2.3)")
return False
log.debug("detected 'argon2pure' backend, with support for 0x%x argon2 hashes",
max_version)
if not dryrun:
warn("Using argon2pure backend, which is 100x+ slower than is required "
"for adequate security. Installing argon2_cffi (via 'pip install argon2_cffi') "
"is strongly recommended", exc.PasslibSecurityWarning)
# build type map
type_map = {}
for type in ALL_TYPES:
try:
type_map[type] = getattr(_argon2pure, "ARGON2" + type.upper())
except AttributeError:
# TYPE_ID support not added until v1.3
assert type not in (TYPE_I, TYPE_D), "unexpected missing type: %r" % type
mixin_cls._backend_type_map = type_map
mixin_cls.version = mixin_cls.max_version = max_version
return mixin_cls._finalize_backend_mixin(name, dryrun)
#===================================================================
# primary methods
#===================================================================
# NOTE: this backend uses default .hash() & .verify() implementations.
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
# TODO: add in 'encoding' support once that's finalized in 1.8 / 1.9.
uh.validate_secret(secret)
secret = to_bytes(secret, "utf-8")
kwds = dict(
password=secret,
salt=self.salt,
time_cost=self.rounds,
memory_cost=self.memory_cost,
parallelism=self.parallelism,
tag_length=self.checksum_size,
type_code=self._get_backend_type(self.type),
version=self.version,
)
if self.max_threads > 0:
kwds['threads'] = self.max_threads
if self.pure_use_threads:
kwds['use_threads'] = True
if self.data:
kwds['associated_data'] = self.data
# NOTE: should return raw bytes
# NOTE: this may raise _argon2pure.Argon2ParameterError,
# but it if does that, there's a bug in our own parameter checking code.
try:
return _argon2pure.argon2(**kwds)
except _argon2pure.Argon2Error as err:
raise self._adapt_backend_error(err, self=self)
#===================================================================
# eoc
#===================================================================
class argon2(_NoBackend, _Argon2Common):
"""
This class implements the Argon2 password hash [#argon2-home]_, and follows the :ref:`password-hash-api`.
Argon2 supports a variable-length salt, and variable time & memory cost,
and a number of other configurable parameters.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.replace` method accepts the following optional keywords:
:type type: str
:param type:
Specify the type of argon2 hash to generate.
Can be one of "ID", "I", "D".
This defaults to "ID" if supported by the backend, otherwise "I".
:type salt: str
:param salt:
Optional salt string.
If specified, the length must be between 0-1024 bytes.
If not specified, one will be auto-generated (this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
:type rounds: int
:param rounds:
Optional number of rounds to use.
This corresponds linearly to the amount of time hashing will take.
:type time_cost: int
:param time_cost:
An alias for **rounds**, for compatibility with underlying argon2 library.
:param int memory_cost:
Defines the memory usage in kibibytes.
This corresponds linearly to the amount of memory hashing will take.
:param int parallelism:
Defines the parallelization factor.
*NOTE: this will affect the resulting hash value.*
:param int digest_size:
Length of the digest in bytes.
:param int max_threads:
Maximum number of threads that will be used.
-1 means unlimited; otherwise hashing will use ``min(parallelism, max_threads)`` threads.
.. note::
This option is currently only honored by the argon2pure backend.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionchanged:: 1.7.2
Added the "type" keyword, and support for type "D" and "ID" hashes.
(Prior versions could verify type "D" hashes, but not generate them).
.. todo::
* Support configurable threading limits.
"""
#=============================================================================
# backend
#=============================================================================
# NOTE: the brunt of the argon2 class is implemented in _Argon2Common.
# there are then subclass for each backend (e.g. _PureBackend),
# these are dynamically prepended to this class's bases
# in order to load the appropriate backend.
#: list of potential backends
backends = ("argon2_cffi", "argon2pure")
#: flag that this class's bases should be modified by SubclassBackendMixin
_backend_mixin_target = True
#: map of backend -> mixin class, used by _get_backend_loader()
_backend_mixin_map = {
None: _NoBackend,
"argon2_cffi": _CffiBackend,
"argon2pure": _PureBackend,
}
#=============================================================================
#
#=============================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/argon2.py | argon2.py |
from __future__ import with_statement, absolute_import
# core
from base64 import b64encode
from hashlib import sha256
import os
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
_bcrypt = None # dynamically imported by _load_backend_bcrypt()
_pybcrypt = None # dynamically imported by _load_backend_pybcrypt()
_bcryptor = None # dynamically imported by _load_backend_bcryptor()
# pkg
_builtin_bcrypt = None # dynamically imported by _load_backend_builtin()
from zdppy_password_hash.crypto.digest import compile_hmac
from zdppy_password_hash.exc import PasslibHashWarning, PasslibSecurityWarning, PasslibSecurityError
from zdppy_password_hash.utils import safe_crypt, repeat_string, to_bytes, parse_version, \
rng, getrandstr, test_crypt, to_unicode, \
utf8_truncate, utf8_repeat_string, crypt_accepts_bytes
from zdppy_password_hash.utils.binary import bcrypt64
from zdppy_password_hash.utils.compat import get_unbound_method_function
from zdppy_password_hash.utils.compat import u, uascii_to_str, unicode, str_to_uascii, PY3, error_from
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"bcrypt",
]
#=============================================================================
# support funcs & constants
#=============================================================================
IDENT_2 = u("$2$")
IDENT_2A = u("$2a$")
IDENT_2X = u("$2x$")
IDENT_2Y = u("$2y$")
IDENT_2B = u("$2b$")
_BNULL = b'\x00'
# reference hash of "test", used in various self-checks
TEST_HASH_2A = "$2a$04$5BJqKfqMQvV7nS.yUguNcueVirQqDBGaLXSqj.rs.pZPlNR0UX/HK"
def _detect_pybcrypt():
"""
internal helper which tries to distinguish pybcrypt vs bcrypt.
:returns:
True if cext-based py-bcrypt,
False if ffi-based bcrypt,
None if 'bcrypt' module not found.
.. versionchanged:: 1.6.3
Now assuming bcrypt installed, unless py-bcrypt explicitly detected.
Previous releases assumed py-bcrypt by default.
Making this change since py-bcrypt is (apparently) unmaintained and static,
whereas bcrypt is being actively maintained, and it's internal structure may shift.
"""
# NOTE: this is also used by the unittests.
# check for module.
try:
import bcrypt
except ImportError:
# XXX: this is ignoring case where py-bcrypt's "bcrypt._bcrypt" C Ext fails to import;
# would need to inspect actual ImportError message to catch that.
return None
# py-bcrypt has a "._bcrypt.__version__" attribute (confirmed for v0.1 - 0.4),
# which bcrypt lacks (confirmed for v1.0 - 2.0)
# "._bcrypt" alone isn't sufficient, since bcrypt 2.0 now has that attribute.
try:
from bcrypt._bcrypt import __version__
except ImportError:
return False
return True
#=============================================================================
# backend mixins
#=============================================================================
class _BcryptCommon(uh.SubclassBackendMixin, uh.TruncateMixin, uh.HasManyIdents,
uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""
Base class which implements brunt of BCrypt code.
This is then subclassed by the various backends,
to override w/ backend-specific methods.
When a backend is loaded, the bases of the 'bcrypt' class proper
are modified to prepend the correct backend-specific subclass.
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "bcrypt"
setting_kwds = ("salt", "rounds", "ident", "truncate_error")
#--------------------
# GenericHandler
#--------------------
checksum_size = 31
checksum_chars = bcrypt64.charmap
#--------------------
# HasManyIdents
#--------------------
default_ident = IDENT_2B
ident_values = (IDENT_2, IDENT_2A, IDENT_2X, IDENT_2Y, IDENT_2B)
ident_aliases = {u("2"): IDENT_2, u("2a"): IDENT_2A, u("2y"): IDENT_2Y,
u("2b"): IDENT_2B}
#--------------------
# HasSalt
#--------------------
min_salt_size = max_salt_size = 22
salt_chars = bcrypt64.charmap
# NOTE: 22nd salt char must be in restricted set of ``final_salt_chars``, not full set above.
final_salt_chars = ".Oeu" # bcrypt64._padinfo2[1]
#--------------------
# HasRounds
#--------------------
default_rounds = 12 # current zdppy_password_hash default
min_rounds = 4 # minimum from bcrypt specification
max_rounds = 31 # 32-bit integer limit (since real_rounds=1<<rounds)
rounds_cost = "log2"
#--------------------
# TruncateMixin
#--------------------
truncate_size = 72
#--------------------
# custom
#--------------------
# backend workaround detection flags
# NOTE: these are only set on the backend mixin classes
_workrounds_initialized = False
_has_2a_wraparound_bug = False
_lacks_20_support = False
_lacks_2y_support = False
_lacks_2b_support = False
_fallback_ident = IDENT_2A
_require_valid_utf8_bytes = False
#===================================================================
# formatting
#===================================================================
@classmethod
def from_string(cls, hash):
ident, tail = cls._parse_ident(hash)
if ident == IDENT_2X:
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
rounds_str, data = tail.split(u("$"))
rounds = int(rounds_str)
if rounds_str != u('%02d') % (rounds,):
raise uh.exc.MalformedHashError(cls, "malformed cost field")
salt, chk = data[:22], data[22:]
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
ident=ident,
)
def to_string(self):
hash = u("%s%02d$%s%s") % (self.ident, self.rounds, self.salt, self.checksum)
return uascii_to_str(hash)
# NOTE: this should be kept separate from to_string()
# so that bcrypt_sha256() can still use it, while overriding to_string()
def _get_config(self, ident):
"""internal helper to prepare config string for backends"""
config = u("%s%02d$%s") % (ident, self.rounds, self.salt)
return uascii_to_str(config)
#===================================================================
# migration
#===================================================================
@classmethod
def needs_update(cls, hash, **kwds):
# NOTE: can't convert this to use _calc_needs_update() helper,
# since _norm_hash() will correct salt padding before we can read it here.
# check for incorrect padding bits (zdppy_password_hash issue 25)
if isinstance(hash, bytes):
hash = hash.decode("ascii")
if hash.startswith(IDENT_2A) and hash[28] not in cls.final_salt_chars:
return True
# TODO: try to detect incorrect 8bit/wraparound hashes using kwds.get("secret")
# hand off to base implementation, so HasRounds can check rounds value.
return super(_BcryptCommon, cls).needs_update(hash, **kwds)
#===================================================================
# specialized salt generation - fixes zdppy_password_hash issue 25
#===================================================================
@classmethod
def normhash(cls, hash):
"""helper to normalize hash, correcting any bcrypt padding bits"""
if cls.identify(hash):
return cls.from_string(hash).to_string()
else:
return hash
@classmethod
def _generate_salt(cls):
# generate random salt as normal,
# but repair last char so the padding bits always decode to zero.
salt = super(_BcryptCommon, cls)._generate_salt()
return bcrypt64.repair_unused(salt)
@classmethod
def _norm_salt(cls, salt, **kwds):
salt = super(_BcryptCommon, cls)._norm_salt(salt, **kwds)
assert salt is not None, "HasSalt didn't generate new salt!"
changed, salt = bcrypt64.check_repair_unused(salt)
if changed:
# FIXME: if salt was provided by user, this message won't be
# correct. not sure if we want to throw error, or use different warning.
warn(
"encountered a bcrypt salt with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; this will be an error under Passlib 2.0",
PasslibHashWarning)
return salt
def _norm_checksum(self, checksum, relaxed=False):
checksum = super(_BcryptCommon, self)._norm_checksum(checksum, relaxed=relaxed)
changed, checksum = bcrypt64.check_repair_unused(checksum)
if changed:
warn(
"encountered a bcrypt hash with incorrectly set padding bits; "
"you may want to use bcrypt.normhash() "
"to fix this; this will be an error under Passlib 2.0",
PasslibHashWarning)
return checksum
#===================================================================
# backend configuration
# NOTE: backends are defined in terms of mixin classes,
# which are dynamically inserted into the bases of the 'bcrypt' class
# via the machinery in 'SubclassBackendMixin'.
# this lets us load in a backend-specific implementation
# of _calc_checksum() and similar methods.
#===================================================================
# NOTE: backend config is located down in <bcrypt> class
# NOTE: set_backend() will execute the ._load_backend_mixin()
# of the matching mixin class, which will handle backend detection
# appended to HasManyBackends' "no backends available" error message
_no_backend_suggestion = " -- recommend you install one (e.g. 'pip install bcrypt')"
@classmethod
def _finalize_backend_mixin(mixin_cls, backend, dryrun):
"""
helper called by from backend mixin classes' _load_backend_mixin() --
invoked after backend imports have been loaded, and performs
feature detection & testing common to all backends.
"""
#----------------------------------------------------------------
# setup helpers
#----------------------------------------------------------------
assert mixin_cls is bcrypt._backend_mixin_map[backend], \
"_configure_workarounds() invoked from wrong class"
if mixin_cls._workrounds_initialized:
return True
verify = mixin_cls.verify
err_types = (ValueError, uh.exc.MissingBackendError)
if _bcryptor:
err_types += (_bcryptor.engine.SaltError,)
def safe_verify(secret, hash):
"""verify() wrapper which traps 'unknown identifier' errors"""
try:
return verify(secret, hash)
except err_types:
# backends without support for given ident will throw various
# errors about unrecognized version:
# os_crypt -- internal code below throws
# - PasswordValueError if there's encoding issue w/ password.
# - InternalBackendError if crypt fails for unknown reason
# (trapped below so we can debug it)
# pybcrypt, bcrypt -- raises ValueError
# bcryptor -- raises bcryptor.engine.SaltError
return NotImplemented
except uh.exc.InternalBackendError:
# _calc_checksum() code may also throw CryptBackendError
# if correct hash isn't returned (e.g. 2y hash converted to 2b,
# such as happens with bcrypt 3.0.0)
log.debug("trapped unexpected response from %r backend: verify(%r, %r):",
backend, secret, hash, exc_info=True)
return NotImplemented
def assert_lacks_8bit_bug(ident):
"""
helper to check for cryptblowfish 8bit bug (fixed in 2y/2b);
even though it's not known to be present in any of zdppy_password_hash's backends.
this is treated as FATAL, because it can easily result in seriously malformed hashes,
and we can't correct for it ourselves.
test cases from <http://cvsweb.openwall.com/cgi/cvsweb.cgi/Owl/packages/glibc/crypt_blowfish/wrapper.c.diff?r1=1.9;r2=1.10>
reference hash is the incorrectly generated $2x$ hash taken from above url
"""
# NOTE: zdppy_password_hash 1.7.2 and earlier used the commented-out LATIN-1 test vector to detect
# this bug; but python3's crypt.crypt() only supports unicode inputs (and
# always encodes them as UTF8 before passing to crypt); so zdppy_password_hash 1.7.3
# switched to the UTF8-compatible test vector below. This one's bug_hash value
# ("$2x$...rcAS") was drawn from the same openwall source (above); and the correct
# hash ("$2a$...X6eu") was generated by passing the raw bytes to python2's
# crypt.crypt() using OpenBSD 6.7 (hash confirmed as same for $2a$ & $2b$).
# LATIN-1 test vector
# secret = b"\xA3"
# bug_hash = ident.encode("ascii") + b"05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e"
# correct_hash = ident.encode("ascii") + b"05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq"
# UTF-8 test vector
secret = b"\xd1\x91" # aka "\u0451"
bug_hash = ident.encode("ascii") + b"05$6bNw2HLQYeqHYyBfLMsv/OiwqTymGIGzFsA4hOTWebfehXHNprcAS"
correct_hash = ident.encode("ascii") + b"05$6bNw2HLQYeqHYyBfLMsv/OUcZd0LKP39b87nBw3.S2tVZSqiQX6eu"
if verify(secret, bug_hash):
# NOTE: this only EVER be observed in (broken) 2a and (backward-compat) 2x hashes
# generated by crypt_blowfish library. 2y/2b hashes should not have the bug
# (but we check w/ them anyways).
raise PasslibSecurityError(
"zdppy_password_hash.hash.bcrypt: Your installation of the %r backend is vulnerable to "
"the crypt_blowfish 8-bit bug (CVE-2011-2483) under %r hashes, "
"and should be upgraded or replaced with another backend" % (backend, ident))
# it doesn't have wraparound bug, but make sure it *does* verify against the correct
# hash, or we're in some weird third case!
if not verify(secret, correct_hash):
raise RuntimeError("%s backend failed to verify %s 8bit hash" % (backend, ident))
def detect_wrap_bug(ident):
"""
check for bsd wraparound bug (fixed in 2b)
this is treated as a warning, because it's rare in the field,
and pybcrypt (as of 2015-7-21) is unpatched, but some people may be stuck with it.
test cases from <http://www.openwall.com/lists/oss-security/2012/01/02/4>
NOTE: reference hash is of password "0"*72
NOTE: if in future we need to deliberately create hashes which have this bug,
can use something like 'hashpw(repeat_string(secret[:((1+secret) % 256) or 1]), 72)'
"""
# check if it exhibits wraparound bug
secret = (b"0123456789"*26)[:255]
bug_hash = ident.encode("ascii") + b"04$R1lJ2gkNaoPGdafE.H.16.nVyh2niHsGJhayOHLMiXlI45o8/DU.6"
if verify(secret, bug_hash):
return True
# if it doesn't have wraparound bug, make sure it *does* handle things
# correctly -- or we're in some weird third case.
correct_hash = ident.encode("ascii") + b"04$R1lJ2gkNaoPGdafE.H.16.1MKHPvmKwryeulRe225LKProWYwt9Oi"
if not verify(secret, correct_hash):
raise RuntimeError("%s backend failed to verify %s wraparound hash" % (backend, ident))
return False
def assert_lacks_wrap_bug(ident):
if not detect_wrap_bug(ident):
return
# should only see in 2a, later idents should NEVER exhibit this bug:
# * 2y implementations should have been free of it
# * 2b was what (supposedly) fixed it
raise RuntimeError("%s backend unexpectedly has wraparound bug for %s" % (backend, ident))
#----------------------------------------------------------------
# check for old 20 support
#----------------------------------------------------------------
test_hash_20 = b"$2$04$5BJqKfqMQvV7nS.yUguNcuRfMMOXK0xPWavM7pOzjEi5ze5T1k8/S"
result = safe_verify("test", test_hash_20)
if result is NotImplemented:
mixin_cls._lacks_20_support = True
log.debug("%r backend lacks $2$ support, enabling workaround", backend)
elif not result:
raise RuntimeError("%s incorrectly rejected $2$ hash" % backend)
#----------------------------------------------------------------
# check for 2a support
#----------------------------------------------------------------
result = safe_verify("test", TEST_HASH_2A)
if result is NotImplemented:
# 2a support is required, and should always be present
raise RuntimeError("%s lacks support for $2a$ hashes" % backend)
elif not result:
raise RuntimeError("%s incorrectly rejected $2a$ hash" % backend)
else:
assert_lacks_8bit_bug(IDENT_2A)
if detect_wrap_bug(IDENT_2A):
if backend == "os_crypt":
# don't make this a warning for os crypt (e.g. openbsd);
# they'll have proper 2b implementation which will be used for new hashes.
# so even if we didn't have a workaround, this bug wouldn't be a concern.
log.debug("%r backend has $2a$ bsd wraparound bug, enabling workaround", backend)
else:
# installed library has the bug -- want to let users know,
# so they can upgrade it to something better (e.g. bcrypt cffi library)
warn("zdppy_password_hash.hash.bcrypt: Your installation of the %r backend is vulnerable to "
"the bsd wraparound bug, "
"and should be upgraded or replaced with another backend "
"(enabling workaround for now)." % backend,
uh.exc.PasslibSecurityWarning)
mixin_cls._has_2a_wraparound_bug = True
#----------------------------------------------------------------
# check for 2y support
#----------------------------------------------------------------
test_hash_2y = TEST_HASH_2A.replace("2a", "2y")
result = safe_verify("test", test_hash_2y)
if result is NotImplemented:
mixin_cls._lacks_2y_support = True
log.debug("%r backend lacks $2y$ support, enabling workaround", backend)
elif not result:
raise RuntimeError("%s incorrectly rejected $2y$ hash" % backend)
else:
# NOTE: Not using this as fallback candidate,
# lacks wide enough support across implementations.
assert_lacks_8bit_bug(IDENT_2Y)
assert_lacks_wrap_bug(IDENT_2Y)
#----------------------------------------------------------------
# TODO: check for 2x support
#----------------------------------------------------------------
#----------------------------------------------------------------
# check for 2b support
#----------------------------------------------------------------
test_hash_2b = TEST_HASH_2A.replace("2a", "2b")
result = safe_verify("test", test_hash_2b)
if result is NotImplemented:
mixin_cls._lacks_2b_support = True
log.debug("%r backend lacks $2b$ support, enabling workaround", backend)
elif not result:
raise RuntimeError("%s incorrectly rejected $2b$ hash" % backend)
else:
mixin_cls._fallback_ident = IDENT_2B
assert_lacks_8bit_bug(IDENT_2B)
assert_lacks_wrap_bug(IDENT_2B)
# set flag so we don't have to run this again
mixin_cls._workrounds_initialized = True
return True
#===================================================================
# digest calculation
#===================================================================
# _calc_checksum() defined by backends
def _prepare_digest_args(self, secret):
"""
common helper for backends to implement _calc_checksum().
takes in secret, returns (secret, ident) pair,
"""
return self._norm_digest_args(secret, self.ident, new=self.use_defaults)
@classmethod
def _norm_digest_args(cls, secret, ident, new=False):
# make sure secret is unicode
require_valid_utf8_bytes = cls._require_valid_utf8_bytes
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
elif require_valid_utf8_bytes:
# if backend requires utf8 bytes (os_crypt);
# make sure input actually is utf8, or don't bother enabling utf-8 specific helpers.
try:
secret.decode("utf-8")
except UnicodeDecodeError:
# XXX: could just throw PasswordValueError here, backend will just do that
# when _calc_digest() is actually called.
require_valid_utf8_bytes = False
# check max secret size
uh.validate_secret(secret)
# check for truncation (during .hash() calls only)
if new:
cls._check_truncate_policy(secret)
# NOTE: especially important to forbid NULLs for bcrypt, since many
# backends (bcryptor, bcrypt) happily accept them, and then
# silently truncate the password at first NULL they encounter!
if _BNULL in secret:
raise uh.exc.NullPasswordError(cls)
# TODO: figure out way to skip these tests when not needed...
# protect from wraparound bug by truncating secret before handing it to the backend.
# bcrypt only uses first 72 bytes anyways.
# NOTE: not needed for 2y/2b, but might use 2a as fallback for them.
if cls._has_2a_wraparound_bug and len(secret) >= 255:
if require_valid_utf8_bytes:
# backend requires valid utf8 bytes, so truncate secret to nearest valid segment.
# want to do this in constant time to not give away info about secret.
# NOTE: this only works because bcrypt will ignore everything past
# secret[71], so padding to include a full utf8 sequence
# won't break anything about the final output.
secret = utf8_truncate(secret, 72)
else:
secret = secret[:72]
# special case handling for variants (ordered most common first)
if ident == IDENT_2A:
# nothing needs to be done.
pass
elif ident == IDENT_2B:
if cls._lacks_2b_support:
# handle $2b$ hash format even if backend is too old.
# have it generate a 2A/2Y digest, then return it as a 2B hash.
# 2a-only backend could potentially exhibit wraparound bug --
# but we work around that issue above.
ident = cls._fallback_ident
elif ident == IDENT_2Y:
if cls._lacks_2y_support:
# handle $2y$ hash format (not supported by BSDs, being phased out on others)
# have it generate a 2A/2B digest, then return it as a 2Y hash.
ident = cls._fallback_ident
elif ident == IDENT_2:
if cls._lacks_20_support:
# handle legacy $2$ format (not supported by most backends except BSD os_crypt)
# we can fake $2$ behavior using the 2A/2Y/2B algorithm
# by repeating the password until it's at least 72 chars in length.
if secret:
if require_valid_utf8_bytes:
# NOTE: this only works because bcrypt will ignore everything past
# secret[71], so padding to include a full utf8 sequence
# won't break anything about the final output.
secret = utf8_repeat_string(secret, 72)
else:
secret = repeat_string(secret, 72)
ident = cls._fallback_ident
elif ident == IDENT_2X:
# NOTE: shouldn't get here.
# XXX: could check if backend does actually offer 'support'
raise RuntimeError("$2x$ hashes not currently supported by zdppy_password_hash")
else:
raise AssertionError("unexpected ident value: %r" % ident)
return secret, ident
#-----------------------------------------------------------------------
# stub backend
#-----------------------------------------------------------------------
class _NoBackend(_BcryptCommon):
"""
mixin used before any backend has been loaded.
contains stubs that force loading of one of the available backends.
"""
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
self._stub_requires_backend()
# NOTE: have to use super() here so that we don't recursively
# call subclass's wrapped _calc_checksum, e.g. bcrypt_sha256._calc_checksum
return super(bcrypt, self)._calc_checksum(secret)
#===================================================================
# eoc
#===================================================================
#-----------------------------------------------------------------------
# bcrypt backend
#-----------------------------------------------------------------------
class _BcryptBackend(_BcryptCommon):
"""
backend which uses 'bcrypt' package
"""
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
# try to import bcrypt
global _bcrypt
if _detect_pybcrypt():
# pybcrypt was installed instead
return False
try:
import bcrypt as _bcrypt
except ImportError: # pragma: no cover
return False
try:
version = _bcrypt.__about__.__version__
except:
log.warning("(trapped) error reading bcrypt version", exc_info=True)
version = '<unknown>'
log.debug("detected 'bcrypt' backend, version %r", version)
return mixin_cls._finalize_backend_mixin(name, dryrun)
# # TODO: would like to implementing verify() directly,
# # to skip need for parsing hash strings.
# # below method has a few edge cases where it chokes though.
# @classmethod
# def verify(cls, secret, hash):
# if isinstance(hash, unicode):
# hash = hash.encode("ascii")
# ident = hash[:hash.index(b"$", 1)+1].decode("ascii")
# if ident not in cls.ident_values:
# raise uh.exc.InvalidHashError(cls)
# secret, eff_ident = cls._norm_digest_args(secret, ident)
# if eff_ident != ident:
# # lacks support for original ident, replace w/ new one.
# hash = eff_ident.encode("ascii") + hash[len(ident):]
# result = _bcrypt.hashpw(secret, hash)
# assert result.startswith(eff_ident)
# return consteq(result, hash)
def _calc_checksum(self, secret):
# bcrypt behavior:
# secret must be bytes
# config must be ascii bytes
# returns ascii bytes
secret, ident = self._prepare_digest_args(secret)
config = self._get_config(ident)
if isinstance(config, unicode):
config = config.encode("ascii")
hash = _bcrypt.hashpw(secret, config)
assert isinstance(hash, bytes)
if not hash.startswith(config) or len(hash) != len(config)+31:
raise uh.exc.CryptBackendError(self, config, hash, source="`bcrypt` package")
return hash[-31:].decode("ascii")
#-----------------------------------------------------------------------
# bcryptor backend
#-----------------------------------------------------------------------
class _BcryptorBackend(_BcryptCommon):
"""
backend which uses 'bcryptor' package
"""
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
# try to import bcryptor
global _bcryptor
try:
import bcryptor as _bcryptor
except ImportError: # pragma: no cover
return False
# deprecated as of 1.7.2
if not dryrun:
warn("Support for `bcryptor` is deprecated, and will be removed in Passlib 1.8; "
"Please use `pip install bcrypt` instead", DeprecationWarning)
return mixin_cls._finalize_backend_mixin(name, dryrun)
def _calc_checksum(self, secret):
# bcryptor behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: not supported
secret, ident = self._prepare_digest_args(secret)
config = self._get_config(ident)
hash = _bcryptor.engine.Engine(False).hash_key(secret, config)
if not hash.startswith(config) or len(hash) != len(config) + 31:
raise uh.exc.CryptBackendError(self, config, hash, source="bcryptor library")
return str_to_uascii(hash[-31:])
#-----------------------------------------------------------------------
# pybcrypt backend
#-----------------------------------------------------------------------
class _PyBcryptBackend(_BcryptCommon):
"""
backend which uses 'pybcrypt' package
"""
#: classwide thread lock used for pybcrypt < 0.3
_calc_lock = None
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
# try to import pybcrypt
global _pybcrypt
if not _detect_pybcrypt():
# not installed, or bcrypt installed instead
return False
try:
import bcrypt as _pybcrypt
except ImportError: # pragma: no cover
# XXX: should we raise AssertionError here? (if get here, _detect_pybcrypt() is broken)
return False
# deprecated as of 1.7.2
if not dryrun:
warn("Support for `py-bcrypt` is deprecated, and will be removed in Passlib 1.8; "
"Please use `pip install bcrypt` instead", DeprecationWarning)
# determine pybcrypt version
try:
version = _pybcrypt._bcrypt.__version__
except:
log.warning("(trapped) error reading pybcrypt version", exc_info=True)
version = "<unknown>"
log.debug("detected 'pybcrypt' backend, version %r", version)
# return calc function based on version
vinfo = parse_version(version) or (0, 0)
if vinfo < (0, 3):
warn("py-bcrypt %s has a major security vulnerability, "
"you should upgrade to py-bcrypt 0.3 immediately."
% version, uh.exc.PasslibSecurityWarning)
if mixin_cls._calc_lock is None:
import threading
mixin_cls._calc_lock = threading.Lock()
mixin_cls._calc_checksum = get_unbound_method_function(mixin_cls._calc_checksum_threadsafe)
return mixin_cls._finalize_backend_mixin(name, dryrun)
def _calc_checksum_threadsafe(self, secret):
# as workaround for pybcrypt < 0.3's concurrency issue,
# we wrap everything in a thread lock. as long as bcrypt is only
# used through zdppy_password_hash, this should be safe.
with self._calc_lock:
return self._calc_checksum_raw(secret)
def _calc_checksum_raw(self, secret):
# py-bcrypt behavior:
# py2: unicode secret/hash encoded as ascii bytes before use,
# bytes taken as-is; returns ascii bytes.
# py3: unicode secret encoded as utf-8 bytes,
# hash encoded as ascii bytes, returns ascii unicode.
secret, ident = self._prepare_digest_args(secret)
config = self._get_config(ident)
hash = _pybcrypt.hashpw(secret, config)
if not hash.startswith(config) or len(hash) != len(config) + 31:
raise uh.exc.CryptBackendError(self, config, hash, source="pybcrypt library")
return str_to_uascii(hash[-31:])
_calc_checksum = _calc_checksum_raw
#-----------------------------------------------------------------------
# os crypt backend
#-----------------------------------------------------------------------
class _OsCryptBackend(_BcryptCommon):
"""
backend which uses :func:`crypt.crypt`
"""
#: set flag to ensure _prepare_digest_args() doesn't create invalid utf8 string
#: when truncating bytes.
_require_valid_utf8_bytes = not crypt_accepts_bytes
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
if not test_crypt("test", TEST_HASH_2A):
return False
return mixin_cls._finalize_backend_mixin(name, dryrun)
def _calc_checksum(self, secret):
#
# run secret through crypt.crypt().
# if everything goes right, we'll get back a properly formed bcrypt hash.
#
secret, ident = self._prepare_digest_args(secret)
config = self._get_config(ident)
hash = safe_crypt(secret, config)
if hash is not None:
if not hash.startswith(config) or len(hash) != len(config) + 31:
raise uh.exc.CryptBackendError(self, config, hash)
return hash[-31:]
#
# Check if this failed due to non-UTF8 bytes
# In detail: under py3, crypt.crypt() requires unicode inputs, which are then encoded to
# utf8 before passing them to os crypt() call. this is done according to the "s" format
# specifier for PyArg_ParseTuple (https://docs.python.org/3/c-api/arg.html).
# There appears no way to get around that to pass raw bytes; so we just throw error here
# to let user know they need to use another backend if they want raw bytes support.
#
# XXX: maybe just let safe_crypt() throw UnicodeDecodeError under zdppy_password_hash 2.0,
# and then catch it above? maybe have safe_crypt ALWAYS throw error
# instead of returning None? (would save re-detecting what went wrong)
# XXX: isn't secret ALWAYS bytes at this point?
#
if PY3 and isinstance(secret, bytes):
try:
secret.decode("utf-8")
except UnicodeDecodeError:
raise error_from(uh.exc.PasswordValueError(
"python3 crypt.crypt() ony supports bytes passwords using UTF8; "
"zdppy_password_hash recommends running `pip install bcrypt` for general bcrypt support.",
), None)
#
# else crypt() call failed for unknown reason.
#
# NOTE: getting here should be considered a bug in zdppy_password_hash --
# if os_crypt backend detection said there's support,
# and we've already checked all known reasons above;
# want them to file bug so we can figure out what happened.
# in the meantime, users can avoid this by installing bcrypt-cffi backend;
# which won't have this (or utf8) edgecases.
#
# XXX: throw something more specific, like an "InternalBackendError"?
# NOTE: if do change this error, need to update test_81_crypt_fallback() expectations
# about what will be thrown; as well as safe_verify() above.
#
debug_only_repr = uh.exc.debug_only_repr
raise uh.exc.InternalBackendError(
"crypt.crypt() failed for unknown reason; "
"zdppy_password_hash recommends running `pip install bcrypt` for general bcrypt support."
# for debugging UTs --
"(config=%s, secret=%s)" % (debug_only_repr(config), debug_only_repr(secret)),
)
#-----------------------------------------------------------------------
# builtin backend
#-----------------------------------------------------------------------
class _BuiltinBackend(_BcryptCommon):
"""
backend which uses zdppy_password_hash's pure-python implementation
"""
@classmethod
def _load_backend_mixin(mixin_cls, name, dryrun):
from zdppy_password_hash.utils import as_bool
if not as_bool(os.environ.get("PASSLIB_BUILTIN_BCRYPT")):
log.debug("bcrypt 'builtin' backend not enabled via $PASSLIB_BUILTIN_BCRYPT")
return False
global _builtin_bcrypt
from zdppy_password_hash.crypto._blowfish import raw_bcrypt as _builtin_bcrypt
return mixin_cls._finalize_backend_mixin(name, dryrun)
def _calc_checksum(self, secret):
secret, ident = self._prepare_digest_args(secret)
chk = _builtin_bcrypt(secret, ident[1:-1],
self.salt.encode("ascii"), self.rounds)
return chk.decode("ascii")
#=============================================================================
# handler
#=============================================================================
class bcrypt(_NoBackend, _BcryptCommon):
"""This class implements the BCrypt password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 22 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 12, must be between 4 and 31, inclusive.
This value is logarithmic, the actual number of iterations used will be :samp:`2**{rounds}`
-- increasing the rounds by +1 will double the amount of time taken.
:type ident: str
:param ident:
Specifies which version of the BCrypt algorithm will be used when creating a new hash.
Typically this option is not needed, as the default (``"2b"``) is usually the correct choice.
If specified, it must be one of the following:
* ``"2"`` - the first revision of BCrypt, which suffers from a minor security flaw and is generally not used anymore.
* ``"2a"`` - some implementations suffered from rare security flaws, replaced by 2b.
* ``"2y"`` - format specific to the *crypt_blowfish* BCrypt implementation,
identical to ``"2b"`` in all but name.
* ``"2b"`` - latest revision of the official BCrypt algorithm, current default.
:param bool truncate_error:
By default, BCrypt will silently truncate passwords larger than 72 bytes.
Setting ``truncate_error=True`` will cause :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`
to raise a :exc:`~zdppy_password_hash.exc.PasswordTruncateError` instead.
.. versionadded:: 1.7
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This class now supports ``"2y"`` hashes, and recognizes
(but does not support) the broken ``"2x"`` hashes.
(see the :ref:`crypt_blowfish bug <crypt-blowfish-bug>`
for details).
.. versionchanged:: 1.6
Added a pure-python backend.
.. versionchanged:: 1.6.3
Added support for ``"2b"`` variant.
.. versionchanged:: 1.7
Now defaults to ``"2b"`` variant.
"""
#=============================================================================
# backend
#=============================================================================
# NOTE: the brunt of the bcrypt class is implemented in _BcryptCommon.
# there are then subclass for each backend (e.g. _PyBcryptBackend),
# these are dynamically prepended to this class's bases
# in order to load the appropriate backend.
#: list of potential backends
backends = ("bcrypt", "pybcrypt", "bcryptor", "os_crypt", "builtin")
#: flag that this class's bases should be modified by SubclassBackendMixin
_backend_mixin_target = True
#: map of backend -> mixin class, used by _get_backend_loader()
_backend_mixin_map = {
None: _NoBackend,
"bcrypt": _BcryptBackend,
"pybcrypt": _PyBcryptBackend,
"bcryptor": _BcryptorBackend,
"os_crypt": _OsCryptBackend,
"builtin": _BuiltinBackend,
}
#=============================================================================
# eoc
#=============================================================================
#=============================================================================
# variants
#=============================================================================
_UDOLLAR = u("$")
# XXX: it might be better to have all the bcrypt variants share a common base class,
# and have the (django_)bcrypt_sha256 wrappers just proxy bcrypt instead of subclassing it.
class _wrapped_bcrypt(bcrypt):
"""
abstracts out some bits bcrypt_sha256 & django_bcrypt_sha256 share.
- bypass backend-loading wrappers for hash() etc
- disable truncation support, sha256 wrappers don't need it.
"""
setting_kwds = tuple(elem for elem in bcrypt.setting_kwds if elem not in ["truncate_error"])
truncate_size = None
# XXX: these will be needed if any bcrypt backends directly implement this...
# @classmethod
# def hash(cls, secret, **kwds):
# # bypass bcrypt backend overriding this method
# # XXX: would wrapping bcrypt make this easier than subclassing it?
# return super(_BcryptCommon, cls).hash(secret, **kwds)
#
# @classmethod
# def verify(cls, secret, hash):
# # bypass bcrypt backend overriding this method
# return super(_BcryptCommon, cls).verify(secret, hash)
#
# @classmethod
# def genhash(cls, secret, hash):
# # bypass bcrypt backend overriding this method
# return super(_BcryptCommon, cls).genhash(secret, hash)
@classmethod
def _check_truncate_policy(cls, secret):
# disable check performed by bcrypt(), since this doesn't truncate passwords.
pass
#=============================================================================
# bcrypt sha256 wrapper
#=============================================================================
class bcrypt_sha256(_wrapped_bcrypt):
"""
This class implements a composition of BCrypt + HMAC_SHA256,
and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept
all the same optional keywords as the base :class:`bcrypt` hash.
.. versionadded:: 1.6.2
.. versionchanged:: 1.7
Now defaults to ``"2b"`` bcrypt variant; though supports older hashes
generated using the ``"2a"`` bcrypt variant.
.. versionchanged:: 1.7.3
For increased security, updated to use HMAC-SHA256 instead of plain SHA256.
Now only supports the ``"2b"`` bcrypt variant. Hash format updated to "v=2".
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "bcrypt_sha256"
#--------------------
# GenericHandler
#--------------------
# this is locked at 2b for now (with 2a allowed only for legacy v1 format)
ident_values = (IDENT_2A, IDENT_2B)
# clone bcrypt's ident aliases so they can be used here as well...
ident_aliases = (lambda ident_values: dict(item for item in bcrypt.ident_aliases.items()
if item[1] in ident_values))(ident_values)
default_ident = IDENT_2B
#--------------------
# class specific
#--------------------
_supported_versions = set([1, 2])
#===================================================================
# instance attrs
#===================================================================
#: wrapper version.
#: v1 -- used prior to zdppy_password_hash 1.7.3; performs ``bcrypt(sha256(secret), salt, cost)``
#: v2 -- new in zdppy_password_hash 1.7.3; performs `bcrypt(sha256_hmac(salt, secret), salt, cost)``
version = 2
#===================================================================
# configuration
#===================================================================
@classmethod
def using(cls, version=None, **kwds):
subcls = super(bcrypt_sha256, cls).using(**kwds)
if version is not None:
subcls.version = subcls._norm_version(version)
ident = subcls.default_ident
if subcls.version > 1 and ident != IDENT_2B:
raise ValueError("bcrypt %r hashes not allowed for version %r" %
(ident, subcls.version))
return subcls
#===================================================================
# formatting
#===================================================================
# sample hash:
# $bcrypt-sha256$2a,6$/3OeRpbOf8/l6nPPRdZPp.$nRiyYqPobEZGdNRBWihQhiFDh1ws1tu
# $bcrypt-sha256$ -- prefix/identifier
# 2a -- bcrypt variant
# , -- field separator
# 6 -- bcrypt work factor
# $ -- section separator
# /3OeRpbOf8/l6nPPRdZPp. -- salt
# $ -- section separator
# nRiyYqPobEZGdNRBWihQhiFDh1ws1tu -- digest
# XXX: we can't use .ident attr due to bcrypt code using it.
# working around that via prefix.
prefix = u('$bcrypt-sha256$')
#: current version 2 hash format
_v2_hash_re = re.compile(r"""(?x)
^
[$]bcrypt-sha256[$]
v=(?P<version>\d+),
t=(?P<type>2b),
r=(?P<rounds>\d{1,2})
[$](?P<salt>[^$]{22})
(?:[$](?P<digest>[^$]{31}))?
$
""")
#: old version 1 hash format
_v1_hash_re = re.compile(r"""(?x)
^
[$]bcrypt-sha256[$]
(?P<type>2[ab]),
(?P<rounds>\d{1,2})
[$](?P<salt>[^$]{22})
(?:[$](?P<digest>[^$]{31}))?
$
""")
@classmethod
def identify(cls, hash):
hash = uh.to_unicode_for_identify(hash)
if not hash:
return False
return hash.startswith(cls.prefix)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
if not hash.startswith(cls.prefix):
raise uh.exc.InvalidHashError(cls)
m = cls._v2_hash_re.match(hash)
if m:
version = int(m.group("version"))
if version < 2:
raise uh.exc.MalformedHashError(cls)
else:
m = cls._v1_hash_re.match(hash)
if m:
version = 1
else:
raise uh.exc.MalformedHashError(cls)
rounds = m.group("rounds")
if rounds.startswith(uh._UZERO) and rounds != uh._UZERO:
raise uh.exc.ZeroPaddedRoundsError(cls)
return cls(
version=version,
ident=m.group("type"),
rounds=int(rounds),
salt=m.group("salt"),
checksum=m.group("digest"),
)
_v2_template = u("$bcrypt-sha256$v=2,t=%s,r=%d$%s$%s")
_v1_template = u("$bcrypt-sha256$%s,%d$%s$%s")
def to_string(self):
if self.version == 1:
template = self._v1_template
else:
template = self._v2_template
hash = template % (self.ident.strip(_UDOLLAR), self.rounds, self.salt, self.checksum)
return uascii_to_str(hash)
#===================================================================
# init
#===================================================================
def __init__(self, version=None, **kwds):
if version is not None:
self.version = self._norm_version(version)
super(bcrypt_sha256, self).__init__(**kwds)
#===================================================================
# version
#===================================================================
@classmethod
def _norm_version(cls, version):
if version not in cls._supported_versions:
raise ValueError("%s: unknown or unsupported version: %r" % (cls.name, version))
return version
#===================================================================
# checksum
#===================================================================
def _calc_checksum(self, secret):
# NOTE: can't use digest directly, since bcrypt stops at first NULL.
# NOTE: bcrypt doesn't fully mix entropy for bytes 55-72 of password
# (XXX: citation needed), so we don't want key to be > 55 bytes.
# thus, have to use base64 (44 bytes) rather than hex (64 bytes).
# XXX: it's later come out that 55-72 may be ok, so later revision of bcrypt_sha256
# may switch to hex encoding, since it's simpler to implement elsewhere.
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if self.version == 1:
# version 1 -- old version just ran secret through sha256(),
# though this could be vulnerable to a breach attach
# (c.f. issue 114); which is why v2 switched to hmac wrapper.
digest = sha256(secret).digest()
else:
# version 2 -- running secret through HMAC keyed off salt.
# this prevents known secret -> sha256 password tables from being
# used to test against a bcrypt_sha256 hash.
# keying off salt (instead of constant string) should minimize chances of this
# colliding with existing table of hmac digest lookups as well.
# NOTE: salt in this case is the "bcrypt64"-encoded value, not the raw salt bytes,
# to make things easier for parallel implementations of this hash --
# saving them the trouble of implementing a "bcrypt64" decoder.
salt = self.salt
if salt[-1] not in self.final_salt_chars:
# forbidding salts with padding bits set, because bcrypt implementations
# won't consistently hash them the same. since we control this format,
# just prevent these from even getting used.
raise ValueError("invalid salt string")
digest = compile_hmac("sha256", salt.encode("ascii"))(secret)
# NOTE: output of b64encode() uses "+/" altchars, "=" padding chars,
# and no leading/trailing whitespace.
key = b64encode(digest)
# hand result off to normal bcrypt algorithm
return super(bcrypt_sha256, self)._calc_checksum(key)
#===================================================================
# other
#===================================================================
def _calc_needs_update(self, **kwds):
if self.version < type(self).version:
return True
return super(bcrypt_sha256, self)._calc_needs_update(**kwds)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/bcrypt.py | bcrypt.py |
#=============================================================================
# imports
#=============================================================================
# core
from hashlib import md5
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import safe_crypt, test_crypt, repeat_string
from zdppy_password_hash.utils.binary import h64
from zdppy_password_hash.utils.compat import unicode, u
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"md5_crypt",
"apr_md5_crypt",
]
#=============================================================================
# pure-python backend
#=============================================================================
_BNULL = b"\x00"
_MD5_MAGIC = b"$1$"
_APR_MAGIC = b"$apr1$"
# pre-calculated offsets used to speed up C digest stage (see notes below).
# sequence generated using the following:
##perms_order = "p,pp,ps,psp,sp,spp".split(",")
##def offset(i):
## key = (("p" if i % 2 else "") + ("s" if i % 3 else "") +
## ("p" if i % 7 else "") + ("" if i % 2 else "p"))
## return perms_order.index(key)
##_c_digest_offsets = [(offset(i), offset(i+1)) for i in range(0,42,2)]
_c_digest_offsets = (
(0, 3), (5, 1), (5, 3), (1, 2), (5, 1), (5, 3), (1, 3),
(4, 1), (5, 3), (1, 3), (5, 0), (5, 3), (1, 3), (5, 1),
(4, 3), (1, 3), (5, 1), (5, 2), (1, 3), (5, 1), (5, 3),
)
# map used to transpose bytes when encoding final digest
_transpose_map = (12, 6, 0, 13, 7, 1, 14, 8, 2, 15, 9, 3, 5, 10, 4, 11)
def _raw_md5_crypt(pwd, salt, use_apr=False):
"""perform raw md5-crypt calculation
this function provides a pure-python implementation of the internals
for the MD5-Crypt algorithms; it doesn't handle any of the
parsing/validation of the hash strings themselves.
:arg pwd: password chars/bytes to hash
:arg salt: salt chars to use
:arg use_apr: use apache variant
:returns:
encoded checksum chars
"""
# NOTE: regarding 'apr' format:
# really, apache? you had to invent a whole new "$apr1$" format,
# when all you did was change the ident incorporated into the hash?
# would love to find webpage explaining why just using a portable
# implementation of $1$ wasn't sufficient. *nothing else* was changed.
#===================================================================
# init & validate inputs
#===================================================================
# validate secret
# XXX: not sure what official unicode policy is, using this as default
if isinstance(pwd, unicode):
pwd = pwd.encode("utf-8")
assert isinstance(pwd, bytes), "pwd not unicode or bytes"
if _BNULL in pwd:
raise uh.exc.NullPasswordError(md5_crypt)
pwd_len = len(pwd)
# validate salt - should have been taken care of by caller
assert isinstance(salt, unicode), "salt not unicode"
salt = salt.encode("ascii")
assert len(salt) < 9, "salt too large"
# NOTE: spec says salts larger than 8 bytes should be truncated,
# instead of causing an error. this function assumes that's been
# taken care of by the handler class.
# load APR specific constants
if use_apr:
magic = _APR_MAGIC
else:
magic = _MD5_MAGIC
#===================================================================
# digest B - used as subinput to digest A
#===================================================================
db = md5(pwd + salt + pwd).digest()
#===================================================================
# digest A - used to initialize first round of digest C
#===================================================================
# start out with pwd + magic + salt
a_ctx = md5(pwd + magic + salt)
a_ctx_update = a_ctx.update
# add pwd_len bytes of b, repeating b as many times as needed.
a_ctx_update(repeat_string(db, pwd_len))
# add null chars & first char of password
# NOTE: this may have historically been a bug,
# where they meant to use db[0] instead of B_NULL,
# but the original code memclear'ed db,
# and now all implementations have to use this.
i = pwd_len
evenchar = pwd[:1]
while i:
a_ctx_update(_BNULL if i & 1 else evenchar)
i >>= 1
# finish A
da = a_ctx.digest()
#===================================================================
# digest C - for a 1000 rounds, combine A, S, and P
# digests in various ways; in order to burn CPU time.
#===================================================================
# NOTE: the original MD5-Crypt implementation performs the C digest
# calculation using the following loop:
#
##dc = da
##i = 0
##while i < rounds:
## tmp_ctx = md5(pwd if i & 1 else dc)
## if i % 3:
## tmp_ctx.update(salt)
## if i % 7:
## tmp_ctx.update(pwd)
## tmp_ctx.update(dc if i & 1 else pwd)
## dc = tmp_ctx.digest()
## i += 1
#
# The code Passlib uses (below) implements an equivalent algorithm,
# it's just been heavily optimized to pre-calculate a large number
# of things beforehand. It works off of a couple of observations
# about the original algorithm:
#
# 1. each round is a combination of 'dc', 'salt', and 'pwd'; and the exact
# combination is determined by whether 'i' a multiple of 2,3, and/or 7.
# 2. since lcm(2,3,7)==42, the series of combinations will repeat
# every 42 rounds.
# 3. even rounds 0-40 consist of 'hash(dc + round-specific-constant)';
# while odd rounds 1-41 consist of hash(round-specific-constant + dc)
#
# Using these observations, the following code...
# * calculates the round-specific combination of salt & pwd for each round 0-41
# * runs through as many 42-round blocks as possible (23)
# * runs through as many pairs of rounds as needed for remaining rounds (17)
# * this results in the required 42*23+2*17=1000 rounds required by md5_crypt.
#
# this cuts out a lot of the control overhead incurred when running the
# original loop 1000 times in python, resulting in ~20% increase in
# speed under CPython (though still 2x slower than glibc crypt)
# prepare the 6 combinations of pwd & salt which are needed
# (order of 'perms' must match how _c_digest_offsets was generated)
pwd_pwd = pwd+pwd
pwd_salt = pwd+salt
perms = [pwd, pwd_pwd, pwd_salt, pwd_salt+pwd, salt+pwd, salt+pwd_pwd]
# build up list of even-round & odd-round constants,
# and store in 21-element list as (even,odd) pairs.
data = [ (perms[even], perms[odd]) for even, odd in _c_digest_offsets]
# perform 23 blocks of 42 rounds each (for a total of 966 rounds)
dc = da
blocks = 23
while blocks:
for even, odd in data:
dc = md5(odd + md5(dc + even).digest()).digest()
blocks -= 1
# perform 17 more pairs of rounds (34 more rounds, for a total of 1000)
for even, odd in data[:17]:
dc = md5(odd + md5(dc + even).digest()).digest()
#===================================================================
# encode digest using appropriate transpose map
#===================================================================
return h64.encode_transposed_bytes(dc, _transpose_map).decode("ascii")
#=============================================================================
# handler
#=============================================================================
class _MD5_Common(uh.HasSalt, uh.GenericHandler):
"""common code for md5_crypt and apr_md5_crypt"""
#===================================================================
# class attrs
#===================================================================
# name - set in subclass
setting_kwds = ("salt", "salt_size")
# ident - set in subclass
checksum_size = 22
checksum_chars = uh.HASH64_CHARS
max_salt_size = 8
salt_chars = uh.HASH64_CHARS
#===================================================================
# methods
#===================================================================
@classmethod
def from_string(cls, hash):
salt, chk = uh.parse_mc2(hash, cls.ident, handler=cls)
return cls(salt=salt, checksum=chk)
def to_string(self):
return uh.render_mc2(self.ident, self.salt, self.checksum)
# _calc_checksum() - provided by subclass
#===================================================================
# eoc
#===================================================================
class md5_crypt(uh.HasManyBackends, _MD5_Common):
"""This class implements the MD5-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 0-8 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type salt_size: int
:param salt_size:
Optional number of characters to use when autogenerating new salts.
Defaults to 8, but can be any value between 0 and 8.
(This is mainly needed when generating Cisco-compatible hashes,
which require ``salt_size=4``).
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
name = "md5_crypt"
ident = u("$1$")
#===================================================================
# methods
#===================================================================
# FIXME: can't find definitive policy on how md5-crypt handles non-ascii.
# all backends currently coerce -> utf-8
backends = ("os_crypt", "builtin")
#---------------------------------------------------------------
# os_crypt backend
#---------------------------------------------------------------
@classmethod
def _load_backend_os_crypt(cls):
if test_crypt("test", '$1$test$pi/xDtU5WFVRqYS6BMU8X/'):
cls._set_calc_checksum_backend(cls._calc_checksum_os_crypt)
return True
else:
return False
def _calc_checksum_os_crypt(self, secret):
config = self.ident + self.salt
hash = safe_crypt(secret, config)
if hash is None:
# py3's crypt.crypt() can't handle non-utf8 bytes.
# fallback to builtin alg, which is always available.
return self._calc_checksum_builtin(secret)
if not hash.startswith(config) or len(hash) != len(config) + 23:
raise uh.exc.CryptBackendError(self, config, hash)
return hash[-22:]
#---------------------------------------------------------------
# builtin backend
#---------------------------------------------------------------
@classmethod
def _load_backend_builtin(cls):
cls._set_calc_checksum_backend(cls._calc_checksum_builtin)
return True
def _calc_checksum_builtin(self, secret):
return _raw_md5_crypt(secret, self.salt)
#===================================================================
# eoc
#===================================================================
class apr_md5_crypt(_MD5_Common):
"""This class implements the Apr-MD5-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 0-8 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
name = "apr_md5_crypt"
ident = u("$apr1$")
#===================================================================
# methods
#===================================================================
def _calc_checksum(self, secret):
return _raw_md5_crypt(secret, self.salt, use_apr=True)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/md5_crypt.py | md5_crypt.py |
from hashlib import md5
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils.binary import h64
from zdppy_password_hash.utils.compat import u, uascii_to_str, unicode
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"phpass",
]
#=============================================================================
# phpass
#=============================================================================
class phpass(uh.HasManyIdents, uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""This class implements the PHPass Portable Hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 8 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 19, must be between 7 and 30, inclusive.
This value is logarithmic, the actual number of iterations used will be :samp:`2**{rounds}`.
:type ident: str
:param ident:
phpBB3 uses ``H`` instead of ``P`` for its identifier,
this may be set to ``H`` in order to generate phpBB3 compatible hashes.
it defaults to ``P``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "phpass"
setting_kwds = ("salt", "rounds", "ident")
checksum_chars = uh.HASH64_CHARS
#--HasSalt--
min_salt_size = max_salt_size = 8
salt_chars = uh.HASH64_CHARS
#--HasRounds--
default_rounds = 19
min_rounds = 7
max_rounds = 30
rounds_cost = "log2"
#--HasManyIdents--
default_ident = u("$P$")
ident_values = (u("$P$"), u("$H$"))
ident_aliases = {u("P"):u("$P$"), u("H"):u("$H$")}
#===================================================================
# formatting
#===================================================================
#$P$9IQRaTwmfeRo7ud9Fh4E2PdI0S3r.L0
# $P$
# 9
# IQRaTwmf
# eRo7ud9Fh4E2PdI0S3r.L0
@classmethod
def from_string(cls, hash):
ident, data = cls._parse_ident(hash)
rounds, salt, chk = data[0], data[1:9], data[9:]
return cls(
ident=ident,
rounds=h64.decode_int6(rounds.encode("ascii")),
salt=salt,
checksum=chk or None,
)
def to_string(self):
hash = u("%s%s%s%s") % (self.ident,
h64.encode_int6(self.rounds).decode("ascii"),
self.salt,
self.checksum or u(''))
return uascii_to_str(hash)
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
# FIXME: can't find definitive policy on how phpass handles non-ascii.
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
real_rounds = 1<<self.rounds
result = md5(self.salt.encode("ascii") + secret).digest()
r = 0
while r < real_rounds:
result = md5(result + secret).digest()
r += 1
return h64.encode_bytes(result).decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/phpass.py | phpass.py |
# core
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import safe_crypt, test_crypt
from zdppy_password_hash.utils.binary import h64
from zdppy_password_hash.utils.compat import u, unicode, irange
from zdppy_password_hash.crypto.digest import compile_hmac
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
]
#=============================================================================
# sha1-crypt
#=============================================================================
_BNULL = b'\x00'
class sha1_crypt(uh.HasManyBackends, uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""This class implements the SHA1-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, an 8 character one will be autogenerated (this is recommended).
If specified, it must be 0-64 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 8 bytes, but can be any value between 0 and 64.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 480000, must be between 1 and 4294967295, inclusive.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "sha1_crypt"
setting_kwds = ("salt", "salt_size", "rounds")
ident = u("$sha1$")
checksum_size = 28
checksum_chars = uh.HASH64_CHARS
#--HasSalt--
default_salt_size = 8
max_salt_size = 64
salt_chars = uh.HASH64_CHARS
#--HasRounds--
default_rounds = 480000 # current zdppy_password_hash default
min_rounds = 1 # really, this should be higher.
max_rounds = 4294967295 # 32-bit integer limit
rounds_cost = "linear"
#===================================================================
# formatting
#===================================================================
@classmethod
def from_string(cls, hash):
rounds, salt, chk = uh.parse_mc3(hash, cls.ident, handler=cls)
return cls(rounds=rounds, salt=salt, checksum=chk)
def to_string(self, config=False):
chk = None if config else self.checksum
return uh.render_mc3(self.ident, self.rounds, self.salt, chk)
#===================================================================
# backend
#===================================================================
backends = ("os_crypt", "builtin")
#---------------------------------------------------------------
# os_crypt backend
#---------------------------------------------------------------
@classmethod
def _load_backend_os_crypt(cls):
if test_crypt("test", '$sha1$1$Wq3GL2Vp$C8U25GvfHS8qGHim'
'ExLaiSFlGkAe'):
cls._set_calc_checksum_backend(cls._calc_checksum_os_crypt)
return True
else:
return False
def _calc_checksum_os_crypt(self, secret):
config = self.to_string(config=True)
hash = safe_crypt(secret, config)
if hash is None:
# py3's crypt.crypt() can't handle non-utf8 bytes.
# fallback to builtin alg, which is always available.
return self._calc_checksum_builtin(secret)
if not hash.startswith(config) or len(hash) != len(config) + 29:
raise uh.exc.CryptBackendError(self, config, hash)
return hash[-28:]
#---------------------------------------------------------------
# builtin backend
#---------------------------------------------------------------
@classmethod
def _load_backend_builtin(cls):
cls._set_calc_checksum_backend(cls._calc_checksum_builtin)
return True
def _calc_checksum_builtin(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise uh.exc.NullPasswordError(self)
rounds = self.rounds
# NOTE: this seed value is NOT the same as the config string
result = (u("%s$sha1$%s") % (self.salt, rounds)).encode("ascii")
# NOTE: this algorithm is essentially PBKDF1, modified to use HMAC.
keyed_hmac = compile_hmac("sha1", secret)
for _ in irange(rounds):
result = keyed_hmac(result)
return h64.encode_transposed_bytes(result, self._chk_offsets).decode("ascii")
_chk_offsets = [
2,1,0,
5,4,3,
8,7,6,
11,10,9,
14,13,12,
17,16,15,
0,19,18,
]
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/sha1_crypt.py | sha1_crypt.py |
#=============================================================================
# imports
#=============================================================================
# core
import sys
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import to_native_str, str_consteq
from zdppy_password_hash.utils.compat import unicode, u, unicode_or_bytes_types
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"unix_disabled",
"unix_fallback",
"plaintext",
]
#=============================================================================
# handler
#=============================================================================
class unix_fallback(uh.ifc.DisabledHash, uh.StaticHandler):
"""This class provides the fallback behavior for unix shadow files, and follows the :ref:`password-hash-api`.
This class does not implement a hash, but instead provides fallback
behavior as found in /etc/shadow on most unix variants.
If used, should be the last scheme in the context.
* this class will positively identify all hash strings.
* for security, passwords will always hash to ``!``.
* it rejects all passwords if the hash is NOT an empty string (``!`` or ``*`` are frequently used).
* by default it rejects all passwords if the hash is an empty string,
but if ``enable_wildcard=True`` is passed to verify(),
all passwords will be allowed through if the hash is an empty string.
.. deprecated:: 1.6
This has been deprecated due to its "wildcard" feature,
and will be removed in Passlib 1.8. Use :class:`unix_disabled` instead.
"""
name = "unix_fallback"
context_kwds = ("enable_wildcard",)
@classmethod
def identify(cls, hash):
if isinstance(hash, unicode_or_bytes_types):
return True
else:
raise uh.exc.ExpectedStringError(hash, "hash")
def __init__(self, enable_wildcard=False, **kwds):
warn("'unix_fallback' is deprecated, "
"and will be removed in Passlib 1.8; "
"please use 'unix_disabled' instead.",
DeprecationWarning)
super(unix_fallback, self).__init__(**kwds)
self.enable_wildcard = enable_wildcard
def _calc_checksum(self, secret):
if self.checksum:
# NOTE: hash will generally be "!", but we want to preserve
# it in case it's something else, like "*".
return self.checksum
else:
return u("!")
@classmethod
def verify(cls, secret, hash, enable_wildcard=False):
uh.validate_secret(secret)
if not isinstance(hash, unicode_or_bytes_types):
raise uh.exc.ExpectedStringError(hash, "hash")
elif hash:
return False
else:
return enable_wildcard
_MARKER_CHARS = u("*!")
_MARKER_BYTES = b"*!"
class unix_disabled(uh.ifc.DisabledHash, uh.MinimalHandler):
"""This class provides disabled password behavior for unix shadow files,
and follows the :ref:`password-hash-api`.
This class does not implement a hash, but instead matches the "disabled account"
strings found in ``/etc/shadow`` on most Unix variants. "encrypting" a password
will simply return the disabled account marker. It will reject all passwords,
no matter the hash string. The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`
method supports one optional keyword:
:type marker: str
:param marker:
Optional marker string which overrides the platform default
used to indicate a disabled account.
If not specified, this will default to ``"*"`` on BSD systems,
and use the Linux default ``"!"`` for all other platforms.
(:attr:`!unix_disabled.default_marker` will contain the default value)
.. versionadded:: 1.6
This class was added as a replacement for the now-deprecated
:class:`unix_fallback` class, which had some undesirable features.
"""
name = "unix_disabled"
setting_kwds = ("marker",)
context_kwds = ()
_disable_prefixes = tuple(str(_MARKER_CHARS))
# TODO: rename attr to 'marker'...
if 'bsd' in sys.platform: # pragma: no cover -- runtime detection
default_marker = u("*")
else:
# use the linux default for other systems
# (glibc also supports adding old hash after the marker
# so it can be restored later).
default_marker = u("!")
@classmethod
def using(cls, marker=None, **kwds):
subcls = super(unix_disabled, cls).using(**kwds)
if marker is not None:
if not cls.identify(marker):
raise ValueError("invalid marker: %r" % marker)
subcls.default_marker = marker
return subcls
@classmethod
def identify(cls, hash):
# NOTE: technically, anything in the /etc/shadow password field
# which isn't valid crypt() output counts as "disabled".
# but that's rather ambiguous, and it's hard to predict what
# valid output is for unknown crypt() implementations.
# so to be on the safe side, we only match things *known*
# to be disabled field indicators, and will add others
# as they are found. things beginning w/ "$" should *never* match.
#
# things currently matched:
# * linux uses "!"
# * bsd uses "*"
# * linux may use "!" + hash to disable but preserve original hash
# * linux counts empty string as "any password";
# this code recognizes it, but treats it the same as "!"
if isinstance(hash, unicode):
start = _MARKER_CHARS
elif isinstance(hash, bytes):
start = _MARKER_BYTES
else:
raise uh.exc.ExpectedStringError(hash, "hash")
return not hash or hash[0] in start
@classmethod
def verify(cls, secret, hash):
uh.validate_secret(secret)
if not cls.identify(hash): # handles typecheck
raise uh.exc.InvalidHashError(cls)
return False
@classmethod
def hash(cls, secret, **kwds):
if kwds:
uh.warn_hash_settings_deprecation(cls, kwds)
return cls.using(**kwds).hash(secret)
uh.validate_secret(secret)
marker = cls.default_marker
assert marker and cls.identify(marker)
return to_native_str(marker, param="marker")
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, marker=None):
if not cls.identify(config):
raise uh.exc.InvalidHashError(cls)
elif config:
# preserve the existing str,since it might contain a disabled password hash ("!" + hash)
uh.validate_secret(secret)
return to_native_str(config, param="config")
else:
if marker is not None:
cls = cls.using(marker=marker)
return cls.hash(secret)
@classmethod
def disable(cls, hash=None):
out = cls.hash("")
if hash is not None:
hash = to_native_str(hash, param="hash")
if cls.identify(hash):
# extract original hash, so that we normalize marker
hash = cls.enable(hash)
if hash:
out += hash
return out
@classmethod
def enable(cls, hash):
hash = to_native_str(hash, param="hash")
for prefix in cls._disable_prefixes:
if hash.startswith(prefix):
orig = hash[len(prefix):]
if orig:
return orig
else:
raise ValueError("cannot restore original hash")
raise uh.exc.InvalidHashError(cls)
class plaintext(uh.MinimalHandler):
"""This class stores passwords in plaintext, and follows the :ref:`password-hash-api`.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`, :meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods all require the
following additional contextual keyword:
:type encoding: str
:param encoding:
This controls the character encoding to use (defaults to ``utf-8``).
This encoding will be used to encode :class:`!unicode` passwords
under Python 2, and decode :class:`!bytes` hashes under Python 3.
.. versionchanged:: 1.6
The ``encoding`` keyword was added.
"""
# NOTE: this is subclassed by ldap_plaintext
name = "plaintext"
setting_kwds = ()
context_kwds = ("encoding",)
default_encoding = "utf-8"
@classmethod
def identify(cls, hash):
if isinstance(hash, unicode_or_bytes_types):
return True
else:
raise uh.exc.ExpectedStringError(hash, "hash")
@classmethod
def hash(cls, secret, encoding=None):
uh.validate_secret(secret)
if not encoding:
encoding = cls.default_encoding
return to_native_str(secret, encoding, "secret")
@classmethod
def verify(cls, secret, hash, encoding=None):
if not encoding:
encoding = cls.default_encoding
hash = to_native_str(hash, encoding, "hash")
if not cls.identify(hash):
raise uh.exc.InvalidHashError(cls)
return str_consteq(cls.hash(secret, encoding), hash)
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls):
return cls.hash("")
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, encoding=None):
# NOTE: 'config' is ignored, as this hash has no salting / etc
if not cls.identify(config):
raise uh.exc.InvalidHashError(cls)
return cls.hash(secret, encoding=encoding)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/misc.py | misc.py |
from binascii import hexlify, unhexlify
from hashlib import sha1
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import consteq
from zdppy_password_hash.utils.compat import bascii_to_str, unicode, u
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"mssql2000",
"mssql2005",
]
#=============================================================================
# mssql 2000
#=============================================================================
def _raw_mssql(secret, salt):
assert isinstance(secret, unicode)
assert isinstance(salt, bytes)
return sha1(secret.encode("utf-16-le") + salt).digest()
BIDENT = b"0x0100"
##BIDENT2 = b("\x01\x00")
UIDENT = u("0x0100")
def _ident_mssql(hash, csize, bsize):
"""common identify for mssql 2000/2005"""
if isinstance(hash, unicode):
if len(hash) == csize and hash.startswith(UIDENT):
return True
elif isinstance(hash, bytes):
if len(hash) == csize and hash.startswith(BIDENT):
return True
##elif len(hash) == bsize and hash.startswith(BIDENT2): # raw bytes
## return True
else:
raise uh.exc.ExpectedStringError(hash, "hash")
return False
def _parse_mssql(hash, csize, bsize, handler):
"""common parser for mssql 2000/2005; returns 4 byte salt + checksum"""
if isinstance(hash, unicode):
if len(hash) == csize and hash.startswith(UIDENT):
try:
return unhexlify(hash[6:].encode("utf-8"))
except TypeError: # throw when bad char found
pass
elif isinstance(hash, bytes):
# assumes ascii-compat encoding
assert isinstance(hash, bytes)
if len(hash) == csize and hash.startswith(BIDENT):
try:
return unhexlify(hash[6:])
except TypeError: # throw when bad char found
pass
##elif len(hash) == bsize and hash.startswith(BIDENT2): # raw bytes
## return hash[2:]
else:
raise uh.exc.ExpectedStringError(hash, "hash")
raise uh.exc.InvalidHashError(handler)
class mssql2000(uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements the password hash used by MS-SQL 2000, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 4 bytes in length.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
"""
#===================================================================
# algorithm information
#===================================================================
name = "mssql2000"
setting_kwds = ("salt",)
checksum_size = 40
min_salt_size = max_salt_size = 4
#===================================================================
# formatting
#===================================================================
# 0100 - 2 byte identifier
# 4 byte salt
# 20 byte checksum
# 20 byte checksum
# = 46 bytes
# encoded '0x' + 92 chars = 94
@classmethod
def identify(cls, hash):
return _ident_mssql(hash, 94, 46)
@classmethod
def from_string(cls, hash):
data = _parse_mssql(hash, 94, 46, cls)
return cls(salt=data[:4], checksum=data[4:])
def to_string(self):
raw = self.salt + self.checksum
# raw bytes format - BIDENT2 + raw
return "0x0100" + bascii_to_str(hexlify(raw).upper())
def _calc_checksum(self, secret):
if isinstance(secret, bytes):
secret = secret.decode("utf-8")
salt = self.salt
return _raw_mssql(secret, salt) + _raw_mssql(secret.upper(), salt)
@classmethod
def verify(cls, secret, hash):
# NOTE: we only compare against the upper-case hash
# XXX: add 'full' just to verify both checksums?
uh.validate_secret(secret)
self = cls.from_string(hash)
chk = self.checksum
if chk is None:
raise uh.exc.MissingDigestError(cls)
if isinstance(secret, bytes):
secret = secret.decode("utf-8")
result = _raw_mssql(secret.upper(), self.salt)
return consteq(result, chk[20:])
#=============================================================================
# handler
#=============================================================================
class mssql2005(uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements the password hash used by MS-SQL 2005, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 4 bytes in length.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
"""
#===================================================================
# algorithm information
#===================================================================
name = "mssql2005"
setting_kwds = ("salt",)
checksum_size = 20
min_salt_size = max_salt_size = 4
#===================================================================
# formatting
#===================================================================
# 0x0100 - 2 byte identifier
# 4 byte salt
# 20 byte checksum
# = 26 bytes
# encoded '0x' + 52 chars = 54
@classmethod
def identify(cls, hash):
return _ident_mssql(hash, 54, 26)
@classmethod
def from_string(cls, hash):
data = _parse_mssql(hash, 54, 26, cls)
return cls(salt=data[:4], checksum=data[4:])
def to_string(self):
raw = self.salt + self.checksum
# raw bytes format - BIDENT2 + raw
return "0x0100" + bascii_to_str(hexlify(raw)).upper()
def _calc_checksum(self, secret):
if isinstance(secret, bytes):
secret = secret.decode("utf-8")
return _raw_mssql(secret, self.salt)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/mssql.py | mssql.py |
#=============================================================================
# imports
#=============================================================================
# core
from hashlib import md5
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import to_bytes
from zdppy_password_hash.utils.compat import str_to_uascii, unicode, u
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"postgres_md5",
]
#=============================================================================
# handler
#=============================================================================
class postgres_md5(uh.HasUserContext, uh.StaticHandler):
"""This class implements the Postgres MD5 Password hash, and follows the :ref:`password-hash-api`.
It does a single round of hashing, and relies on the username as the salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`, :meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods all require the
following additional contextual keywords:
:type user: str
:param user: name of postgres user account this password is associated with.
"""
#===================================================================
# algorithm information
#===================================================================
name = "postgres_md5"
_hash_prefix = u("md5")
checksum_chars = uh.HEX_CHARS
checksum_size = 32
#===================================================================
# primary interface
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
user = to_bytes(self.user, "utf-8", param="user")
return str_to_uascii(md5(secret + user).hexdigest())
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/postgres.py | postgres.py |
#=============================================================================
# imports
#=============================================================================
# core
import hashlib
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import safe_crypt, test_crypt, \
repeat_string, to_unicode
from zdppy_password_hash.utils.binary import h64
from zdppy_password_hash.utils.compat import byte_elem_value, u, \
uascii_to_str, unicode
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"sha512_crypt",
"sha256_crypt",
]
#=============================================================================
# pure-python backend, used by both sha256_crypt & sha512_crypt
# when crypt.crypt() backend is not available.
#=============================================================================
_BNULL = b'\x00'
# pre-calculated offsets used to speed up C digest stage (see notes below).
# sequence generated using the following:
##perms_order = "p,pp,ps,psp,sp,spp".split(",")
##def offset(i):
## key = (("p" if i % 2 else "") + ("s" if i % 3 else "") +
## ("p" if i % 7 else "") + ("" if i % 2 else "p"))
## return perms_order.index(key)
##_c_digest_offsets = [(offset(i), offset(i+1)) for i in range(0,42,2)]
_c_digest_offsets = (
(0, 3), (5, 1), (5, 3), (1, 2), (5, 1), (5, 3), (1, 3),
(4, 1), (5, 3), (1, 3), (5, 0), (5, 3), (1, 3), (5, 1),
(4, 3), (1, 3), (5, 1), (5, 2), (1, 3), (5, 1), (5, 3),
)
# map used to transpose bytes when encoding final sha256_crypt digest
_256_transpose_map = (
20, 10, 0, 11, 1, 21, 2, 22, 12, 23, 13, 3, 14, 4, 24, 5,
25, 15, 26, 16, 6, 17, 7, 27, 8, 28, 18, 29, 19, 9, 30, 31,
)
# map used to transpose bytes when encoding final sha512_crypt digest
_512_transpose_map = (
42, 21, 0, 1, 43, 22, 23, 2, 44, 45, 24, 3, 4, 46, 25, 26,
5, 47, 48, 27, 6, 7, 49, 28, 29, 8, 50, 51, 30, 9, 10, 52,
31, 32, 11, 53, 54, 33, 12, 13, 55, 34, 35, 14, 56, 57, 36, 15,
16, 58, 37, 38, 17, 59, 60, 39, 18, 19, 61, 40, 41, 20, 62, 63,
)
def _raw_sha2_crypt(pwd, salt, rounds, use_512=False):
"""perform raw sha256-crypt / sha512-crypt
this function provides a pure-python implementation of the internals
for the SHA256-Crypt and SHA512-Crypt algorithms; it doesn't
handle any of the parsing/validation of the hash strings themselves.
:arg pwd: password chars/bytes to hash
:arg salt: salt chars to use
:arg rounds: linear rounds cost
:arg use_512: use sha512-crypt instead of sha256-crypt mode
:returns:
encoded checksum chars
"""
#===================================================================
# init & validate inputs
#===================================================================
# NOTE: the setup portion of this algorithm scales ~linearly in time
# with the size of the password, making it vulnerable to a DOS from
# unreasonably large inputs. the following code has some optimizations
# which would make things even worse, using O(pwd_len**2) memory
# when calculating digest P.
#
# to mitigate these two issues: 1) this code switches to a
# O(pwd_len)-memory algorithm for passwords that are much larger
# than average, and 2) Passlib enforces a library-wide max limit on
# the size of passwords it will allow, to prevent this algorithm and
# others from being DOSed in this way (see zdppy_password_hash.exc.PasswordSizeError
# for details).
# validate secret
if isinstance(pwd, unicode):
# XXX: not sure what official unicode policy is, using this as default
pwd = pwd.encode("utf-8")
assert isinstance(pwd, bytes)
if _BNULL in pwd:
raise uh.exc.NullPasswordError(sha512_crypt if use_512 else sha256_crypt)
pwd_len = len(pwd)
# validate rounds
assert 1000 <= rounds <= 999999999, "invalid rounds"
# NOTE: spec says out-of-range rounds should be clipped, instead of
# causing an error. this function assumes that's been taken care of
# by the handler class.
# validate salt
assert isinstance(salt, unicode), "salt not unicode"
salt = salt.encode("ascii")
salt_len = len(salt)
assert salt_len < 17, "salt too large"
# NOTE: spec says salts larger than 16 bytes should be truncated,
# instead of causing an error. this function assumes that's been
# taken care of by the handler class.
# load sha256/512 specific constants
if use_512:
hash_const = hashlib.sha512
transpose_map = _512_transpose_map
else:
hash_const = hashlib.sha256
transpose_map = _256_transpose_map
#===================================================================
# digest B - used as subinput to digest A
#===================================================================
db = hash_const(pwd + salt + pwd).digest()
#===================================================================
# digest A - used to initialize first round of digest C
#===================================================================
# start out with pwd + salt
a_ctx = hash_const(pwd + salt)
a_ctx_update = a_ctx.update
# add pwd_len bytes of b, repeating b as many times as needed.
a_ctx_update(repeat_string(db, pwd_len))
# for each bit in pwd_len: add b if it's 1, or pwd if it's 0
i = pwd_len
while i:
a_ctx_update(db if i & 1 else pwd)
i >>= 1
# finish A
da = a_ctx.digest()
#===================================================================
# digest P from password - used instead of password itself
# when calculating digest C.
#===================================================================
if pwd_len < 96:
# this method is faster under python, but uses O(pwd_len**2) memory;
# so we don't use it for larger passwords to avoid a potential DOS.
dp = repeat_string(hash_const(pwd * pwd_len).digest(), pwd_len)
else:
# this method is slower under python, but uses a fixed amount of memory.
tmp_ctx = hash_const(pwd)
tmp_ctx_update = tmp_ctx.update
i = pwd_len-1
while i:
tmp_ctx_update(pwd)
i -= 1
dp = repeat_string(tmp_ctx.digest(), pwd_len)
assert len(dp) == pwd_len
#===================================================================
# digest S - used instead of salt itself when calculating digest C
#===================================================================
ds = hash_const(salt * (16 + byte_elem_value(da[0]))).digest()[:salt_len]
assert len(ds) == salt_len, "salt_len somehow > hash_len!"
#===================================================================
# digest C - for a variable number of rounds, combine A, S, and P
# digests in various ways; in order to burn CPU time.
#===================================================================
# NOTE: the original SHA256/512-Crypt specification performs the C digest
# calculation using the following loop:
#
##dc = da
##i = 0
##while i < rounds:
## tmp_ctx = hash_const(dp if i & 1 else dc)
## if i % 3:
## tmp_ctx.update(ds)
## if i % 7:
## tmp_ctx.update(dp)
## tmp_ctx.update(dc if i & 1 else dp)
## dc = tmp_ctx.digest()
## i += 1
#
# The code Passlib uses (below) implements an equivalent algorithm,
# it's just been heavily optimized to pre-calculate a large number
# of things beforehand. It works off of a couple of observations
# about the original algorithm:
#
# 1. each round is a combination of 'dc', 'ds', and 'dp'; determined
# by the whether 'i' a multiple of 2,3, and/or 7.
# 2. since lcm(2,3,7)==42, the series of combinations will repeat
# every 42 rounds.
# 3. even rounds 0-40 consist of 'hash(dc + round-specific-constant)';
# while odd rounds 1-41 consist of hash(round-specific-constant + dc)
#
# Using these observations, the following code...
# * calculates the round-specific combination of ds & dp for each round 0-41
# * runs through as many 42-round blocks as possible
# * runs through as many pairs of rounds as possible for remaining rounds
# * performs once last round if the total rounds should be odd.
#
# this cuts out a lot of the control overhead incurred when running the
# original loop 40,000+ times in python, resulting in ~20% increase in
# speed under CPython (though still 2x slower than glibc crypt)
# prepare the 6 combinations of ds & dp which are needed
# (order of 'perms' must match how _c_digest_offsets was generated)
dp_dp = dp+dp
dp_ds = dp+ds
perms = [dp, dp_dp, dp_ds, dp_ds+dp, ds+dp, ds+dp_dp]
# build up list of even-round & odd-round constants,
# and store in 21-element list as (even,odd) pairs.
data = [ (perms[even], perms[odd]) for even, odd in _c_digest_offsets]
# perform as many full 42-round blocks as possible
dc = da
blocks, tail = divmod(rounds, 42)
while blocks:
for even, odd in data:
dc = hash_const(odd + hash_const(dc + even).digest()).digest()
blocks -= 1
# perform any leftover rounds
if tail:
# perform any pairs of rounds
pairs = tail>>1
for even, odd in data[:pairs]:
dc = hash_const(odd + hash_const(dc + even).digest()).digest()
# if rounds was odd, do one last round (since we started at 0,
# last round will be an even-numbered round)
if tail & 1:
dc = hash_const(dc + data[pairs][0]).digest()
#===================================================================
# encode digest using appropriate transpose map
#===================================================================
return h64.encode_transposed_bytes(dc, transpose_map).decode("ascii")
#=============================================================================
# handlers
#=============================================================================
_UROUNDS = u("rounds=")
_UDOLLAR = u("$")
_UZERO = u("0")
class _SHA2_Common(uh.HasManyBackends, uh.HasRounds, uh.HasSalt,
uh.GenericHandler):
"""class containing common code shared by sha256_crypt & sha512_crypt"""
#===================================================================
# class attrs
#===================================================================
# name - set by subclass
setting_kwds = ("salt", "rounds", "implicit_rounds", "salt_size")
# ident - set by subclass
checksum_chars = uh.HASH64_CHARS
# checksum_size - set by subclass
max_salt_size = 16
salt_chars = uh.HASH64_CHARS
min_rounds = 1000 # bounds set by spec
max_rounds = 999999999 # bounds set by spec
rounds_cost = "linear"
_cdb_use_512 = False # flag for _calc_digest_builtin()
_rounds_prefix = None # ident + _UROUNDS
#===================================================================
# methods
#===================================================================
implicit_rounds = False
def __init__(self, implicit_rounds=None, **kwds):
super(_SHA2_Common, self).__init__(**kwds)
# if user calls hash() w/ 5000 rounds, default to compact form.
if implicit_rounds is None:
implicit_rounds = (self.use_defaults and self.rounds == 5000)
self.implicit_rounds = implicit_rounds
def _parse_salt(self, salt):
# required per SHA2-crypt spec -- truncate config salts rather than throwing error
return self._norm_salt(salt, relaxed=self.checksum is None)
def _parse_rounds(self, rounds):
# required per SHA2-crypt spec -- clip config rounds rather than throwing error
return self._norm_rounds(rounds, relaxed=self.checksum is None)
@classmethod
def from_string(cls, hash):
# basic format this parses -
# $5$[rounds=<rounds>$]<salt>[$<checksum>]
# TODO: this *could* use uh.parse_mc3(), except that the rounds
# portion has a slightly different grammar.
# convert to unicode, check for ident prefix, split on dollar signs.
hash = to_unicode(hash, "ascii", "hash")
ident = cls.ident
if not hash.startswith(ident):
raise uh.exc.InvalidHashError(cls)
assert len(ident) == 3
parts = hash[3:].split(_UDOLLAR)
# extract rounds value
if parts[0].startswith(_UROUNDS):
assert len(_UROUNDS) == 7
rounds = parts.pop(0)[7:]
if rounds.startswith(_UZERO) and rounds != _UZERO:
raise uh.exc.ZeroPaddedRoundsError(cls)
rounds = int(rounds)
implicit_rounds = False
else:
rounds = 5000
implicit_rounds = True
# rest should be salt and checksum
if len(parts) == 2:
salt, chk = parts
elif len(parts) == 1:
salt = parts[0]
chk = None
else:
raise uh.exc.MalformedHashError(cls)
# return new object
return cls(
rounds=rounds,
salt=salt,
checksum=chk or None,
implicit_rounds=implicit_rounds,
)
def to_string(self):
if self.rounds == 5000 and self.implicit_rounds:
hash = u("%s%s$%s") % (self.ident, self.salt,
self.checksum or u(''))
else:
hash = u("%srounds=%d$%s$%s") % (self.ident, self.rounds,
self.salt, self.checksum or u(''))
return uascii_to_str(hash)
#===================================================================
# backends
#===================================================================
backends = ("os_crypt", "builtin")
#---------------------------------------------------------------
# os_crypt backend
#---------------------------------------------------------------
#: test hash for OS detection -- provided by subclass
_test_hash = None
@classmethod
def _load_backend_os_crypt(cls):
if test_crypt(*cls._test_hash):
cls._set_calc_checksum_backend(cls._calc_checksum_os_crypt)
return True
else:
return False
def _calc_checksum_os_crypt(self, secret):
config = self.to_string()
hash = safe_crypt(secret, config)
if hash is None:
# py3's crypt.crypt() can't handle non-utf8 bytes.
# fallback to builtin alg, which is always available.
return self._calc_checksum_builtin(secret)
# NOTE: avoiding full parsing routine via from_string().checksum,
# and just extracting the bit we need.
cs = self.checksum_size
if not hash.startswith(self.ident) or hash[-cs-1] != _UDOLLAR:
raise uh.exc.CryptBackendError(self, config, hash)
return hash[-cs:]
#---------------------------------------------------------------
# builtin backend
#---------------------------------------------------------------
@classmethod
def _load_backend_builtin(cls):
cls._set_calc_checksum_backend(cls._calc_checksum_builtin)
return True
def _calc_checksum_builtin(self, secret):
return _raw_sha2_crypt(secret, self.salt, self.rounds,
self._cdb_use_512)
#===================================================================
# eoc
#===================================================================
class sha256_crypt(_SHA2_Common):
"""This class implements the SHA256-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 0-16 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 535000, must be between 1000 and 999999999, inclusive.
.. note::
per the official specification, when the rounds parameter is set to 5000,
it may be omitted from the hash string.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
..
commented out, currently only supported by :meth:`hash`, and not via :meth:`using`:
:type implicit_rounds: bool
:param implicit_rounds:
this is an internal option which generally doesn't need to be touched.
this flag determines whether the hash should omit the rounds parameter
when encoding it to a string; this is only permitted by the spec for rounds=5000,
and the flag is ignored otherwise. the spec requires the two different
encodings be preserved as they are, instead of normalizing them.
"""
#===================================================================
# class attrs
#===================================================================
name = "sha256_crypt"
ident = u("$5$")
checksum_size = 43
# NOTE: using 25/75 weighting of builtin & os_crypt backends
default_rounds = 535000
#===================================================================
# backends
#===================================================================
_test_hash = ("test", "$5$rounds=1000$test$QmQADEXMG8POI5W"
"Dsaeho0P36yK3Tcrgboabng6bkb/")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# sha 512 crypt
#=============================================================================
class sha512_crypt(_SHA2_Common):
"""This class implements the SHA512-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 0-16 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 656000, must be between 1000 and 999999999, inclusive.
.. note::
per the official specification, when the rounds parameter is set to 5000,
it may be omitted from the hash string.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
..
commented out, currently only supported by :meth:`hash`, and not via :meth:`using`:
:type implicit_rounds: bool
:param implicit_rounds:
this is an internal option which generally doesn't need to be touched.
this flag determines whether the hash should omit the rounds parameter
when encoding it to a string; this is only permitted by the spec for rounds=5000,
and the flag is ignored otherwise. the spec requires the two different
encodings be preserved as they are, instead of normalizing them.
"""
#===================================================================
# class attrs
#===================================================================
name = "sha512_crypt"
ident = u("$6$")
checksum_size = 86
_cdb_use_512 = True
# NOTE: using 25/75 weighting of builtin & os_crypt backends
default_rounds = 656000
#===================================================================
# backend
#===================================================================
_test_hash = ("test", "$6$rounds=1000$test$2M/Lx6Mtobqj"
"Ljobw0Wmo4Q5OFx5nVLJvmgseatA6oMn"
"yWeBdRDx4DU.1H3eGmse6pgsOgDisWBG"
"I5c7TZauS0")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/sha2_crypt.py | sha2_crypt.py |
#=============================================================================
# imports
#=============================================================================
# core
from base64 import b64encode
from binascii import hexlify
from hashlib import md5, sha1, sha256
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.handlers.bcrypt import _wrapped_bcrypt
from zdppy_password_hash.hash import argon2, bcrypt, pbkdf2_sha1, pbkdf2_sha256
from zdppy_password_hash.utils import to_unicode, rng, getrandstr
from zdppy_password_hash.utils.binary import BASE64_CHARS
from zdppy_password_hash.utils.compat import str_to_uascii, uascii_to_str, unicode, u
from zdppy_password_hash.crypto.digest import pbkdf2_hmac
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"django_salted_sha1",
"django_salted_md5",
"django_bcrypt",
"django_pbkdf2_sha1",
"django_pbkdf2_sha256",
"django_argon2",
"django_des_crypt",
"django_disabled",
]
#=============================================================================
# lazy imports & constants
#=============================================================================
# imported by django_des_crypt._calc_checksum()
des_crypt = None
def _import_des_crypt():
global des_crypt
if des_crypt is None:
from zdppy_password_hash.hash import des_crypt
return des_crypt
# django 1.4's salt charset
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
#=============================================================================
# salted hashes
#=============================================================================
class DjangoSaltedHash(uh.HasSalt, uh.GenericHandler):
"""base class providing common code for django hashes"""
# name, ident, checksum_size must be set by subclass.
# ident must include "$" suffix.
setting_kwds = ("salt", "salt_size")
# NOTE: django 1.0-1.3 would accept empty salt strings.
# django 1.4 won't, but this appears to be regression
# (https://code.djangoproject.com/ticket/18144)
# so presumably it will be fixed in a later release.
default_salt_size = 12
max_salt_size = None
salt_chars = SALT_CHARS
checksum_chars = uh.LOWER_HEX_CHARS
@classmethod
def from_string(cls, hash):
salt, chk = uh.parse_mc2(hash, cls.ident, handler=cls)
return cls(salt=salt, checksum=chk)
def to_string(self):
return uh.render_mc2(self.ident, self.salt, self.checksum)
# NOTE: only used by PBKDF2
class DjangoVariableHash(uh.HasRounds, DjangoSaltedHash):
"""base class providing common code for django hashes w/ variable rounds"""
setting_kwds = DjangoSaltedHash.setting_kwds + ("rounds",)
min_rounds = 1
@classmethod
def from_string(cls, hash):
rounds, salt, chk = uh.parse_mc3(hash, cls.ident, handler=cls)
return cls(rounds=rounds, salt=salt, checksum=chk)
def to_string(self):
return uh.render_mc3(self.ident, self.rounds, self.salt, self.checksum)
class django_salted_sha1(DjangoSaltedHash):
"""This class implements Django's Salted SHA1 hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and uses a single round of SHA1.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, a 12 character one will be autogenerated (this is recommended).
If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``.
:type salt_size: int
:param salt_size:
Optional number of characters to use when autogenerating new salts.
Defaults to 12, but can be any positive value.
This should be compatible with Django 1.4's :class:`!SHA1PasswordHasher` class.
.. versionchanged: 1.6
This class now generates 12-character salts instead of 5,
and generated salts uses the character range ``[0-9a-zA-Z]`` instead of
the ``[0-9a-f]``. This is to be compatible with how Django >= 1.4
generates these hashes; but hashes generated in this manner will still be
correctly interpreted by earlier versions of Django.
"""
name = "django_salted_sha1"
django_name = "sha1"
ident = u("sha1$")
checksum_size = 40
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return str_to_uascii(sha1(self.salt.encode("ascii") + secret).hexdigest())
class django_salted_md5(DjangoSaltedHash):
"""This class implements Django's Salted MD5 hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and uses a single round of MD5.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, a 12 character one will be autogenerated (this is recommended).
If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``.
:type salt_size: int
:param salt_size:
Optional number of characters to use when autogenerating new salts.
Defaults to 12, but can be any positive value.
This should be compatible with the hashes generated by
Django 1.4's :class:`!MD5PasswordHasher` class.
.. versionchanged: 1.6
This class now generates 12-character salts instead of 5,
and generated salts uses the character range ``[0-9a-zA-Z]`` instead of
the ``[0-9a-f]``. This is to be compatible with how Django >= 1.4
generates these hashes; but hashes generated in this manner will still be
correctly interpreted by earlier versions of Django.
"""
name = "django_salted_md5"
django_name = "md5"
ident = u("md5$")
checksum_size = 32
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return str_to_uascii(md5(self.salt.encode("ascii") + secret).hexdigest())
#=============================================================================
# BCrypt
#=============================================================================
django_bcrypt = uh.PrefixWrapper("django_bcrypt", bcrypt,
prefix=u('bcrypt$'), ident=u("bcrypt$"),
# NOTE: this docstring is duplicated in the docs, since sphinx
# seems to be having trouble reading it via autodata::
doc="""This class implements Django 1.4's BCrypt wrapper, and follows the :ref:`password-hash-api`.
This is identical to :class:`!bcrypt` itself, but with
the Django-specific prefix ``"bcrypt$"`` prepended.
See :doc:`/lib/zdppy_password_hash.hash.bcrypt` for more details,
the usage and behavior is identical.
This should be compatible with the hashes generated by
Django 1.4's :class:`!BCryptPasswordHasher` class.
.. versionadded:: 1.6
""")
django_bcrypt.django_name = "bcrypt"
django_bcrypt._using_clone_attrs += ("django_name",)
#=============================================================================
# BCRYPT + SHA256
#=============================================================================
class django_bcrypt_sha256(_wrapped_bcrypt):
"""This class implements Django 1.6's Bcrypt+SHA256 hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
While the algorithm and format is somewhat different,
the api and options for this hash are identical to :class:`!bcrypt` itself,
see :doc:`bcrypt </lib/zdppy_password_hash.hash.bcrypt>` for more details.
.. versionadded:: 1.6.2
"""
name = "django_bcrypt_sha256"
django_name = "bcrypt_sha256"
_digest = sha256
# sample hash:
# bcrypt_sha256$$2a$06$/3OeRpbOf8/l6nPPRdZPp.nRiyYqPobEZGdNRBWihQhiFDh1ws1tu
# XXX: we can't use .ident attr due to bcrypt code using it.
# working around that via django_prefix
django_prefix = u('bcrypt_sha256$')
@classmethod
def identify(cls, hash):
hash = uh.to_unicode_for_identify(hash)
if not hash:
return False
return hash.startswith(cls.django_prefix)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
if not hash.startswith(cls.django_prefix):
raise uh.exc.InvalidHashError(cls)
bhash = hash[len(cls.django_prefix):]
if not bhash.startswith("$2"):
raise uh.exc.MalformedHashError(cls)
return super(django_bcrypt_sha256, cls).from_string(bhash)
def to_string(self):
bhash = super(django_bcrypt_sha256, self).to_string()
return uascii_to_str(self.django_prefix) + bhash
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
secret = hexlify(self._digest(secret).digest())
return super(django_bcrypt_sha256, self)._calc_checksum(secret)
#=============================================================================
# PBKDF2 variants
#=============================================================================
class django_pbkdf2_sha256(DjangoVariableHash):
"""This class implements Django's PBKDF2-HMAC-SHA256 hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, a 12 character one will be autogenerated (this is recommended).
If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``.
:type salt_size: int
:param salt_size:
Optional number of characters to use when autogenerating new salts.
Defaults to 12, but can be any positive value.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 29000, but must be within ``range(1,1<<32)``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
This should be compatible with the hashes generated by
Django 1.4's :class:`!PBKDF2PasswordHasher` class.
.. versionadded:: 1.6
"""
name = "django_pbkdf2_sha256"
django_name = "pbkdf2_sha256"
ident = u('pbkdf2_sha256$')
min_salt_size = 1
max_rounds = 0xffffffff # setting at 32-bit limit for now
checksum_chars = uh.PADDED_BASE64_CHARS
checksum_size = 44 # 32 bytes -> base64
default_rounds = pbkdf2_sha256.default_rounds # NOTE: django 1.6 uses 12000
_digest = "sha256"
def _calc_checksum(self, secret):
# NOTE: secret & salt will be encoded using UTF-8 by pbkdf2_hmac()
hash = pbkdf2_hmac(self._digest, secret, self.salt, self.rounds)
return b64encode(hash).rstrip().decode("ascii")
class django_pbkdf2_sha1(django_pbkdf2_sha256):
"""This class implements Django's PBKDF2-HMAC-SHA1 hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, a 12 character one will be autogenerated (this is recommended).
If specified, may be any series of characters drawn from the regexp range ``[0-9a-zA-Z]``.
:type salt_size: int
:param salt_size:
Optional number of characters to use when autogenerating new salts.
Defaults to 12, but can be any positive value.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 131000, but must be within ``range(1,1<<32)``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
This should be compatible with the hashes generated by
Django 1.4's :class:`!PBKDF2SHA1PasswordHasher` class.
.. versionadded:: 1.6
"""
name = "django_pbkdf2_sha1"
django_name = "pbkdf2_sha1"
ident = u('pbkdf2_sha1$')
checksum_size = 28 # 20 bytes -> base64
default_rounds = pbkdf2_sha1.default_rounds # NOTE: django 1.6 uses 12000
_digest = "sha1"
#=============================================================================
# Argon2
#=============================================================================
# NOTE: as of 2019-11-11, Django's Argon2PasswordHasher only supports Type I;
# so limiting this to ensure that as well.
django_argon2 = uh.PrefixWrapper(
name="django_argon2",
wrapped=argon2.using(type="I"),
prefix=u('argon2'),
ident=u('argon2$argon2i$'),
# NOTE: this docstring is duplicated in the docs, since sphinx
# seems to be having trouble reading it via autodata::
doc="""This class implements Django 1.10's Argon2 wrapper, and follows the :ref:`password-hash-api`.
This is identical to :class:`!argon2` itself, but with
the Django-specific prefix ``"argon2$"`` prepended.
See :doc:`argon2 </lib/zdppy_password_hash.hash.argon2>` for more details,
the usage and behavior is identical.
This should be compatible with the hashes generated by
Django 1.10's :class:`!Argon2PasswordHasher` class.
.. versionadded:: 1.7
""")
django_argon2.django_name = "argon2"
django_argon2._using_clone_attrs += ("django_name",)
#=============================================================================
# DES
#=============================================================================
class django_des_crypt(uh.TruncateMixin, uh.HasSalt, uh.GenericHandler):
"""This class implements Django's :class:`des_crypt` wrapper, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 2 characters, drawn from the regexp range ``[./0-9A-Za-z]``.
:param bool truncate_error:
By default, django_des_crypt will silently truncate passwords larger than 8 bytes.
Setting ``truncate_error=True`` will cause :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`
to raise a :exc:`~zdppy_password_hash.exc.PasswordTruncateError` instead.
.. versionadded:: 1.7
This should be compatible with the hashes generated by
Django 1.4's :class:`!CryptPasswordHasher` class.
Note that Django only supports this hash on Unix systems
(though :class:`!django_des_crypt` is available cross-platform
under Passlib).
.. versionchanged:: 1.6
This class will now accept hashes with empty salt strings,
since Django 1.4 generates them this way.
"""
name = "django_des_crypt"
django_name = "crypt"
setting_kwds = ("salt", "salt_size", "truncate_error")
ident = u("crypt$")
checksum_chars = salt_chars = uh.HASH64_CHARS
checksum_size = 11
min_salt_size = default_salt_size = 2
truncate_size = 8
# NOTE: regarding duplicate salt field:
#
# django 1.0 had a "crypt$<salt1>$<salt2><digest>" hash format,
# used [a-z0-9] to generate a 5 char salt, stored it in salt1,
# duplicated the first two chars of salt1 as salt2.
# it would throw an error if salt1 was empty.
#
# django 1.4 started generating 2 char salt using the full alphabet,
# left salt1 empty, and only paid attention to salt2.
#
# in order to be compatible with django 1.0, the hashes generated
# by this function will always include salt1, unless the following
# class-level field is disabled (mainly used for testing)
use_duplicate_salt = True
@classmethod
def from_string(cls, hash):
salt, chk = uh.parse_mc2(hash, cls.ident, handler=cls)
if chk:
# chk should be full des_crypt hash
if not salt:
# django 1.4 always uses empty salt field,
# so extract salt from des_crypt hash <chk>
salt = chk[:2]
elif salt[:2] != chk[:2]:
# django 1.0 stored 5 chars in salt field, and duplicated
# the first two chars in <chk>. we keep the full salt,
# but make sure the first two chars match as sanity check.
raise uh.exc.MalformedHashError(cls,
"first two digits of salt and checksum must match")
# in all cases, strip salt chars from <chk>
chk = chk[2:]
return cls(salt=salt, checksum=chk)
def to_string(self):
salt = self.salt
chk = salt[:2] + self.checksum
if self.use_duplicate_salt:
# filling in salt field, so that we're compatible with django 1.0
return uh.render_mc2(self.ident, salt, chk)
else:
# django 1.4+ style hash
return uh.render_mc2(self.ident, "", chk)
def _calc_checksum(self, secret):
# NOTE: we lazily import des_crypt,
# since most django deploys won't use django_des_crypt
global des_crypt
if des_crypt is None:
_import_des_crypt()
# check for truncation (during .hash() calls only)
if self.use_defaults:
self._check_truncate_policy(secret)
return des_crypt(salt=self.salt[:2])._calc_checksum(secret)
class django_disabled(uh.ifc.DisabledHash, uh.StaticHandler):
"""This class provides disabled password behavior for Django, and follows the :ref:`password-hash-api`.
This class does not implement a hash, but instead
claims the special hash string ``"!"`` which Django uses
to indicate an account's password has been disabled.
* newly encrypted passwords will hash to ``"!"``.
* it rejects all passwords.
.. note::
Django 1.6 prepends a randomly generated 40-char alphanumeric string
to each unusuable password. This class recognizes such strings,
but for backwards compatibility, still returns ``"!"``.
See `<https://code.djangoproject.com/ticket/20079>`_ for why
Django appends an alphanumeric string.
.. versionchanged:: 1.6.2 added Django 1.6 support
.. versionchanged:: 1.7 started appending an alphanumeric string.
"""
name = "django_disabled"
_hash_prefix = u("!")
suffix_length = 40
# XXX: move this to StaticHandler, or wherever _hash_prefix is being used?
@classmethod
def identify(cls, hash):
hash = uh.to_unicode_for_identify(hash)
return hash.startswith(cls._hash_prefix)
def _calc_checksum(self, secret):
# generate random suffix to match django's behavior
return getrandstr(rng, BASE64_CHARS[:-2], self.suffix_length)
@classmethod
def verify(cls, secret, hash):
uh.validate_secret(secret)
if not cls.identify(hash):
raise uh.exc.InvalidHashError(cls)
return False
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/django.py | django.py |
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement, absolute_import
# core
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.crypto import scrypt as _scrypt
from zdppy_password_hash.utils import h64, to_bytes
from zdppy_password_hash.utils.binary import h64, b64s_decode, b64s_encode
from zdppy_password_hash.utils.compat import u, bascii_to_str, suppress_cause
from zdppy_password_hash.utils.decor import classproperty
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"scrypt",
]
#=============================================================================
# scrypt format identifiers
#=============================================================================
IDENT_SCRYPT = u("$scrypt$") # identifier used by zdppy_password_hash
IDENT_7 = u("$7$") # used by official scrypt spec
_UDOLLAR = u("$")
#=============================================================================
# handler
#=============================================================================
class scrypt(uh.ParallelismMixin, uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.HasManyIdents,
uh.GenericHandler):
"""This class implements an SCrypt-based password [#scrypt-home]_ hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, a variable number of rounds,
as well as some custom tuning parameters unique to scrypt (see below).
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If specified, the length must be between 0-1024 bytes.
If not specified, one will be auto-generated (this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 16 bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 16, but must be within ``range(1,32)``.
.. warning::
Unlike many hash algorithms, increasing the rounds value
will increase both the time *and memory* required to hash a password.
:type block_size: int
:param block_size:
Optional block size to pass to scrypt hash function (the ``r`` parameter).
Useful for tuning scrypt to optimal performance for your CPU architecture.
Defaults to 8.
:type parallelism: int
:param parallelism:
Optional parallelism to pass to scrypt hash function (the ``p`` parameter).
Defaults to 1.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. note::
The underlying scrypt hash function has a number of limitations
on it's parameter values, which forbids certain combinations of settings.
The requirements are:
* ``linear_rounds = 2**<some positive integer>``
* ``linear_rounds < 2**(16 * block_size)``
* ``block_size * parallelism <= 2**30-1``
.. todo::
This class currently does not support configuring default values
for ``block_size`` or ``parallelism`` via a :class:`~zdppy_password_hash.context.CryptContext`
configuration.
"""
#===================================================================
# class attrs
#===================================================================
#------------------------
# PasswordHash
#------------------------
name = "scrypt"
setting_kwds = ("ident", "salt", "salt_size", "rounds", "block_size", "parallelism")
#------------------------
# GenericHandler
#------------------------
# NOTE: scrypt supports arbitrary output sizes. since it's output runs through
# pbkdf2-hmac-sha256 before returning, and this could be raised eventually...
# but a 256-bit digest is more than sufficient for password hashing.
# XXX: make checksum size configurable? could merge w/ argon2 code that does this.
checksum_size = 32
#------------------------
# HasManyIdents
#------------------------
default_ident = IDENT_SCRYPT
ident_values = (IDENT_SCRYPT, IDENT_7)
#------------------------
# HasRawSalt
#------------------------
default_salt_size = 16
max_salt_size = 1024
#------------------------
# HasRounds
#------------------------
# TODO: would like to dynamically pick this based on system
default_rounds = 16
min_rounds = 1
max_rounds = 31 # limited by scrypt alg
rounds_cost = "log2"
# TODO: make default block size configurable via using(), and deprecatable via .needs_update()
#===================================================================
# instance attrs
#===================================================================
#: default parallelism setting (min=1 currently hardcoded in mixin)
parallelism = 1
#: default block size setting
block_size = 8
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, block_size=None, **kwds):
subcls = super(scrypt, cls).using(**kwds)
if block_size is not None:
if isinstance(block_size, uh.native_string_types):
block_size = int(block_size)
subcls.block_size = subcls._norm_block_size(block_size, relaxed=kwds.get("relaxed"))
# make sure param combination is valid for scrypt()
try:
_scrypt.validate(1 << cls.default_rounds, cls.block_size, cls.parallelism)
except ValueError as err:
raise suppress_cause(ValueError("scrypt: invalid settings combination: " + str(err)))
return subcls
#===================================================================
# parsing
#===================================================================
@classmethod
def from_string(cls, hash):
return cls(**cls.parse(hash))
@classmethod
def parse(cls, hash):
ident, suffix = cls._parse_ident(hash)
func = getattr(cls, "_parse_%s_string" % ident.strip(_UDOLLAR), None)
if func:
return func(suffix)
else:
raise uh.exc.InvalidHashError(cls)
#
# zdppy_password_hash's format:
# $scrypt$ln=<logN>,r=<r>,p=<p>$<salt>[$<digest>]
# where:
# logN, r, p -- decimal-encoded positive integer, no zero-padding
# logN -- log cost setting
# r -- block size setting (usually 8)
# p -- parallelism setting (usually 1)
# salt, digest -- b64-nopad encoded bytes
#
@classmethod
def _parse_scrypt_string(cls, suffix):
# break params, salt, and digest sections
parts = suffix.split("$")
if len(parts) == 3:
params, salt, digest = parts
elif len(parts) == 2:
params, salt = parts
digest = None
else:
raise uh.exc.MalformedHashError(cls, "malformed hash")
# break params apart
parts = params.split(",")
if len(parts) == 3:
nstr, bstr, pstr = parts
assert nstr.startswith("ln=")
assert bstr.startswith("r=")
assert pstr.startswith("p=")
else:
raise uh.exc.MalformedHashError(cls, "malformed settings field")
return dict(
ident=IDENT_SCRYPT,
rounds=int(nstr[3:]),
block_size=int(bstr[2:]),
parallelism=int(pstr[2:]),
salt=b64s_decode(salt.encode("ascii")),
checksum=b64s_decode(digest.encode("ascii")) if digest else None,
)
#
# official format specification defined at
# https://gitlab.com/jas/scrypt-unix-crypt/blob/master/unix-scrypt.txt
# format:
# $7$<N><rrrrr><ppppp><salt...>[$<digest>]
# 0 12345 67890 1
# where:
# All bytes use h64-little-endian encoding
# N: 6-bit log cost setting
# r: 30-bit block size setting
# p: 30-bit parallelism setting
# salt: variable length salt bytes
# digest: fixed 32-byte digest
#
@classmethod
def _parse_7_string(cls, suffix):
# XXX: annoyingly, official spec embeds salt *raw*, yet doesn't specify a hash encoding.
# so assuming only h64 chars are valid for salt, and are ASCII encoded.
# split into params & digest
parts = suffix.encode("ascii").split(b"$")
if len(parts) == 2:
params, digest = parts
elif len(parts) == 1:
params, = parts
digest = None
else:
raise uh.exc.MalformedHashError()
# parse params & return
if len(params) < 11:
raise uh.exc.MalformedHashError(cls, "params field too short")
return dict(
ident=IDENT_7,
rounds=h64.decode_int6(params[:1]),
block_size=h64.decode_int30(params[1:6]),
parallelism=h64.decode_int30(params[6:11]),
salt=params[11:],
checksum=h64.decode_bytes(digest) if digest else None,
)
#===================================================================
# formatting
#===================================================================
def to_string(self):
ident = self.ident
if ident == IDENT_SCRYPT:
return "$scrypt$ln=%d,r=%d,p=%d$%s$%s" % (
self.rounds,
self.block_size,
self.parallelism,
bascii_to_str(b64s_encode(self.salt)),
bascii_to_str(b64s_encode(self.checksum)),
)
else:
assert ident == IDENT_7
salt = self.salt
try:
salt.decode("ascii")
except UnicodeDecodeError:
raise suppress_cause(NotImplementedError("scrypt $7$ hashes dont support non-ascii salts"))
return bascii_to_str(b"".join([
b"$7$",
h64.encode_int6(self.rounds),
h64.encode_int30(self.block_size),
h64.encode_int30(self.parallelism),
self.salt,
b"$",
h64.encode_bytes(self.checksum)
]))
#===================================================================
# init
#===================================================================
def __init__(self, block_size=None, **kwds):
super(scrypt, self).__init__(**kwds)
# init block size
if block_size is None:
assert uh.validate_default_value(self, self.block_size, self._norm_block_size,
param="block_size")
else:
self.block_size = self._norm_block_size(block_size)
# NOTE: if hash contains invalid complex constraint, relying on error
# being raised by scrypt call in _calc_checksum()
@classmethod
def _norm_block_size(cls, block_size, relaxed=False):
return uh.norm_integer(cls, block_size, min=1, param="block_size", relaxed=relaxed)
def _generate_salt(self):
salt = super(scrypt, self)._generate_salt()
if self.ident == IDENT_7:
# this format doesn't support non-ascii salts.
# as workaround, we take raw bytes, encoded to base64
salt = b64s_encode(salt)
return salt
#===================================================================
# backend configuration
# NOTE: this following HasManyBackends' API, but provides it's own implementation,
# which actually switches the backend that 'zdppy_password_hash.crypto.scrypt.scrypt()' uses.
#===================================================================
@classproperty
def backends(cls):
return _scrypt.backend_values
@classmethod
def get_backend(cls):
return _scrypt.backend
@classmethod
def has_backend(cls, name="any"):
try:
cls.set_backend(name, dryrun=True)
return True
except uh.exc.MissingBackendError:
return False
@classmethod
def set_backend(cls, name="any", dryrun=False):
_scrypt._set_backend(name, dryrun=dryrun)
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
secret = to_bytes(secret, param="secret")
return _scrypt.scrypt(secret, self.salt, n=(1 << self.rounds), r=self.block_size,
p=self.parallelism, keylen=self.checksum_size)
#===================================================================
# hash migration
#===================================================================
def _calc_needs_update(self, **kwds):
"""
mark hash as needing update if rounds is outside desired bounds.
"""
# XXX: for now, marking all hashes which don't have matching block_size setting
if self.block_size != type(self).block_size:
return True
return super(scrypt, self)._calc_needs_update(**kwds)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/scrypt.py | scrypt.py |
#=============================================================================
# imports
#=============================================================================
# core
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import consteq, saslprep, to_native_str, splitcomma
from zdppy_password_hash.utils.binary import ab64_decode, ab64_encode
from zdppy_password_hash.utils.compat import bascii_to_str, iteritems, u, native_string_types
from zdppy_password_hash.crypto.digest import pbkdf2_hmac, norm_hash_name
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"scram",
]
#=============================================================================
# scram credentials hash
#=============================================================================
class scram(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class provides a format for storing SCRAM passwords, and follows
the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt bytes.
If specified, the length must be between 0-1024 bytes.
If not specified, a 12 byte salt will be autogenerated
(this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 12 bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 100000, but must be within ``range(1,1<<32)``.
:type algs: list of strings
:param algs:
Specify list of digest algorithms to use.
By default each scram hash will contain digests for SHA-1,
SHA-256, and SHA-512. This can be overridden by specify either be a
list such as ``["sha-1", "sha-256"]``, or a comma-separated string
such as ``"sha-1, sha-256"``. Names are case insensitive, and may
use :mod:`!hashlib` or `IANA <http://www.iana.org/assignments/hash-function-text-names>`_
hash names.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
In addition to the standard :ref:`password-hash-api` methods,
this class also provides the following methods for manipulating Passlib
scram hashes in ways useful for pluging into a SCRAM protocol stack:
.. automethod:: extract_digest_info
.. automethod:: extract_digest_algs
.. automethod:: derive_digest
"""
#===================================================================
# class attrs
#===================================================================
# NOTE: unlike most GenericHandler classes, the 'checksum' attr of
# ScramHandler is actually a map from digest_name -> digest, so
# many of the standard methods have been overridden.
# NOTE: max_salt_size and max_rounds are arbitrarily chosen to provide
# a sanity check; the underlying pbkdf2 specifies no bounds for either.
#--GenericHandler--
name = "scram"
setting_kwds = ("salt", "salt_size", "rounds", "algs")
ident = u("$scram$")
#--HasSalt--
default_salt_size = 12
max_salt_size = 1024
#--HasRounds--
default_rounds = 100000
min_rounds = 1
max_rounds = 2**32-1
rounds_cost = "linear"
#--custom--
# default algorithms when creating new hashes.
default_algs = ["sha-1", "sha-256", "sha-512"]
# list of algs verify prefers to use, in order.
_verify_algs = ["sha-256", "sha-512", "sha-224", "sha-384", "sha-1"]
#===================================================================
# instance attrs
#===================================================================
# 'checksum' is different from most GenericHandler subclasses,
# in that it contains a dict mapping from alg -> digest,
# or None if no checksum present.
# list of algorithms to create/compare digests for.
algs = None
#===================================================================
# scram frontend helpers
#===================================================================
@classmethod
def extract_digest_info(cls, hash, alg):
"""return (salt, rounds, digest) for specific hash algorithm.
:type hash: str
:arg hash:
:class:`!scram` hash stored for desired user
:type alg: str
:arg alg:
Name of digest algorithm (e.g. ``"sha-1"``) requested by client.
This value is run through :func:`~zdppy_password_hash.crypto.digest.norm_hash_name`,
so it is case-insensitive, and can be the raw SCRAM
mechanism name (e.g. ``"SCRAM-SHA-1"``), the IANA name,
or the hashlib name.
:raises KeyError:
If the hash does not contain an entry for the requested digest
algorithm.
:returns:
A tuple containing ``(salt, rounds, digest)``,
where *digest* matches the raw bytes returned by
SCRAM's :func:`Hi` function for the stored password,
the provided *salt*, and the iteration count (*rounds*).
*salt* and *digest* are both raw (unencoded) bytes.
"""
# XXX: this could be sped up by writing custom parsing routine
# that just picks out relevant digest, and doesn't bother
# with full structure validation each time it's called.
alg = norm_hash_name(alg, 'iana')
self = cls.from_string(hash)
chkmap = self.checksum
if not chkmap:
raise ValueError("scram hash contains no digests")
return self.salt, self.rounds, chkmap[alg]
@classmethod
def extract_digest_algs(cls, hash, format="iana"):
"""Return names of all algorithms stored in a given hash.
:type hash: str
:arg hash:
The :class:`!scram` hash to parse
:type format: str
:param format:
This changes the naming convention used by the
returned algorithm names. By default the names
are IANA-compatible; possible values are ``"iana"`` or ``"hashlib"``.
:returns:
Returns a list of digest algorithms; e.g. ``["sha-1"]``
"""
# XXX: this could be sped up by writing custom parsing routine
# that just picks out relevant names, and doesn't bother
# with full structure validation each time it's called.
algs = cls.from_string(hash).algs
if format == "iana":
return algs
else:
return [norm_hash_name(alg, format) for alg in algs]
@classmethod
def derive_digest(cls, password, salt, rounds, alg):
"""helper to create SaltedPassword digest for SCRAM.
This performs the step in the SCRAM protocol described as::
SaltedPassword := Hi(Normalize(password), salt, i)
:type password: unicode or utf-8 bytes
:arg password: password to run through digest
:type salt: bytes
:arg salt: raw salt data
:type rounds: int
:arg rounds: number of iterations.
:type alg: str
:arg alg: name of digest to use (e.g. ``"sha-1"``).
:returns:
raw bytes of ``SaltedPassword``
"""
if isinstance(password, bytes):
password = password.decode("utf-8")
# NOTE: pbkdf2_hmac() will encode secret & salt using utf-8,
# and handle normalizing alg name.
return pbkdf2_hmac(alg, saslprep(password), salt, rounds)
#===================================================================
# serialization
#===================================================================
@classmethod
def from_string(cls, hash):
hash = to_native_str(hash, "ascii", "hash")
if not hash.startswith("$scram$"):
raise uh.exc.InvalidHashError(cls)
parts = hash[7:].split("$")
if len(parts) != 3:
raise uh.exc.MalformedHashError(cls)
rounds_str, salt_str, chk_str = parts
# decode rounds
rounds = int(rounds_str)
if rounds_str != str(rounds): # forbid zero padding, etc.
raise uh.exc.MalformedHashError(cls)
# decode salt
try:
salt = ab64_decode(salt_str.encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
# decode algs/digest list
if not chk_str:
# scram hashes MUST have something here.
raise uh.exc.MalformedHashError(cls)
elif "=" in chk_str:
# comma-separated list of 'alg=digest' pairs
algs = None
chkmap = {}
for pair in chk_str.split(","):
alg, digest = pair.split("=")
try:
chkmap[alg] = ab64_decode(digest.encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
else:
# comma-separated list of alg names, no digests
algs = chk_str
chkmap = None
# return new object
return cls(
rounds=rounds,
salt=salt,
checksum=chkmap,
algs=algs,
)
def to_string(self):
salt = bascii_to_str(ab64_encode(self.salt))
chkmap = self.checksum
chk_str = ",".join(
"%s=%s" % (alg, bascii_to_str(ab64_encode(chkmap[alg])))
for alg in self.algs
)
return '$scram$%d$%s$%s' % (self.rounds, salt, chk_str)
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, default_algs=None, algs=None, **kwds):
# parse aliases
if algs is not None:
assert default_algs is None
default_algs = algs
# create subclass
subcls = super(scram, cls).using(**kwds)
# fill in algs
if default_algs is not None:
subcls.default_algs = cls._norm_algs(default_algs)
return subcls
#===================================================================
# init
#===================================================================
def __init__(self, algs=None, **kwds):
super(scram, self).__init__(**kwds)
# init algs
digest_map = self.checksum
if algs is not None:
if digest_map is not None:
raise RuntimeError("checksum & algs kwds are mutually exclusive")
algs = self._norm_algs(algs)
elif digest_map is not None:
# derive algs list from digest map (if present).
algs = self._norm_algs(digest_map.keys())
elif self.use_defaults:
algs = list(self.default_algs)
assert self._norm_algs(algs) == algs, "invalid default algs: %r" % (algs,)
else:
raise TypeError("no algs list specified")
self.algs = algs
def _norm_checksum(self, checksum, relaxed=False):
if not isinstance(checksum, dict):
raise uh.exc.ExpectedTypeError(checksum, "dict", "checksum")
for alg, digest in iteritems(checksum):
if alg != norm_hash_name(alg, 'iana'):
raise ValueError("malformed algorithm name in scram hash: %r" %
(alg,))
if len(alg) > 9:
raise ValueError("SCRAM limits algorithm names to "
"9 characters: %r" % (alg,))
if not isinstance(digest, bytes):
raise uh.exc.ExpectedTypeError(digest, "raw bytes", "digests")
# TODO: verify digest size (if digest is known)
if 'sha-1' not in checksum:
# NOTE: required because of SCRAM spec.
raise ValueError("sha-1 must be in algorithm list of scram hash")
return checksum
@classmethod
def _norm_algs(cls, algs):
"""normalize algs parameter"""
if isinstance(algs, native_string_types):
algs = splitcomma(algs)
algs = sorted(norm_hash_name(alg, 'iana') for alg in algs)
if any(len(alg)>9 for alg in algs):
raise ValueError("SCRAM limits alg names to max of 9 characters")
if 'sha-1' not in algs:
# NOTE: required because of SCRAM spec (rfc 5802)
raise ValueError("sha-1 must be in algorithm list of scram hash")
return algs
#===================================================================
# migration
#===================================================================
def _calc_needs_update(self, **kwds):
# marks hashes as deprecated if they don't include at least all default_algs.
# XXX: should we deprecate if they aren't exactly the same,
# to permit removing legacy hashes?
if not set(self.algs).issuperset(self.default_algs):
return True
# hand off to base implementation
return super(scram, self)._calc_needs_update(**kwds)
#===================================================================
# digest methods
#===================================================================
def _calc_checksum(self, secret, alg=None):
rounds = self.rounds
salt = self.salt
hash = self.derive_digest
if alg:
# if requested, generate digest for specific alg
return hash(secret, salt, rounds, alg)
else:
# by default, return dict containing digests for all algs
return dict(
(alg, hash(secret, salt, rounds, alg))
for alg in self.algs
)
@classmethod
def verify(cls, secret, hash, full=False):
uh.validate_secret(secret)
self = cls.from_string(hash)
chkmap = self.checksum
if not chkmap:
raise ValueError("expected %s hash, got %s config string instead" %
(cls.name, cls.name))
# NOTE: to make the verify method efficient, we just calculate hash
# of shortest digest by default. apps can pass in "full=True" to
# check entire hash for consistency.
if full:
correct = failed = False
for alg, digest in iteritems(chkmap):
other = self._calc_checksum(secret, alg)
# NOTE: could do this length check in norm_algs(),
# but don't need to be that strict, and want to be able
# to parse hashes containing algs not supported by platform.
# it's fine if we fail here though.
if len(digest) != len(other):
raise ValueError("mis-sized %s digest in scram hash: %r != %r"
% (alg, len(digest), len(other)))
if consteq(other, digest):
correct = True
else:
failed = True
if correct and failed:
raise ValueError("scram hash verified inconsistently, "
"may be corrupted")
else:
return correct
else:
# XXX: should this just always use sha1 hash? would be faster.
# otherwise only verify against one hash, pick one w/ best security.
for alg in self._verify_algs:
if alg in chkmap:
other = self._calc_checksum(secret, alg)
return consteq(other, chkmap[alg])
# there should always be sha-1 at the very least,
# or something went wrong inside _norm_algs()
raise AssertionError("sha-1 digest not found!")
#===================================================================
#
#===================================================================
#=============================================================================
# code used for testing scram against protocol examples during development.
#=============================================================================
##def _test_reference_scram():
## "quick hack testing scram reference vectors"
## # NOTE: "n,," is GS2 header - see https://tools.ietf.org/html/rfc5801
## from zdppy_password_hash.utils.compat import print_
##
## engine = _scram_engine(
## alg="sha-1",
## salt='QSXCR+Q6sek8bf92'.decode("base64"),
## rounds=4096,
## password=u("pencil"),
## )
## print_(engine.digest.encode("base64").rstrip())
##
## msg = engine.format_auth_msg(
## username="user",
## client_nonce = "fyko+d2lbbFgONRv9qkxdawL",
## server_nonce = "3rfcNHYJY1ZVvWVs7j",
## header='c=biws',
## )
##
## cp = engine.get_encoded_client_proof(msg)
## assert cp == "v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", cp
##
## ss = engine.get_encoded_server_sig(msg)
## assert ss == "rmF9pqV8S7suAoZWja4dJRkFsKQ=", ss
##
##class _scram_engine(object):
## """helper class for verifying scram hash behavior
## against SCRAM protocol examples. not officially part of Passlib.
##
## takes in alg, salt, rounds, and a digest or password.
##
## can calculate the various keys & messages of the scram protocol.
##
## """
## #=========================================================
## # init
## #=========================================================
##
## @classmethod
## def from_string(cls, hash, alg):
## "create record from scram hash, for given alg"
## return cls(alg, *scram.extract_digest_info(hash, alg))
##
## def __init__(self, alg, salt, rounds, digest=None, password=None):
## self.alg = norm_hash_name(alg)
## self.salt = salt
## self.rounds = rounds
## self.password = password
## if password:
## data = scram.derive_digest(password, salt, rounds, alg)
## if digest and data != digest:
## raise ValueError("password doesn't match digest")
## else:
## digest = data
## elif not digest:
## raise TypeError("must provide password or digest")
## self.digest = digest
##
## #=========================================================
## # frontend methods
## #=========================================================
## def get_hash(self, data):
## "return hash of raw data"
## return hashlib.new(iana_to_hashlib(self.alg), data).digest()
##
## def get_client_proof(self, msg):
## "return client proof of specified auth msg text"
## return xor_bytes(self.client_key, self.get_client_sig(msg))
##
## def get_encoded_client_proof(self, msg):
## return self.get_client_proof(msg).encode("base64").rstrip()
##
## def get_client_sig(self, msg):
## "return client signature of specified auth msg text"
## return self.get_hmac(self.stored_key, msg)
##
## def get_server_sig(self, msg):
## "return server signature of specified auth msg text"
## return self.get_hmac(self.server_key, msg)
##
## def get_encoded_server_sig(self, msg):
## return self.get_server_sig(msg).encode("base64").rstrip()
##
## def format_server_response(self, client_nonce, server_nonce):
## return 'r={client_nonce}{server_nonce},s={salt},i={rounds}'.format(
## client_nonce=client_nonce,
## server_nonce=server_nonce,
## rounds=self.rounds,
## salt=self.encoded_salt,
## )
##
## def format_auth_msg(self, username, client_nonce, server_nonce,
## header='c=biws'):
## return (
## 'n={username},r={client_nonce}'
## ','
## 'r={client_nonce}{server_nonce},s={salt},i={rounds}'
## ','
## '{header},r={client_nonce}{server_nonce}'
## ).format(
## username=username,
## client_nonce=client_nonce,
## server_nonce=server_nonce,
## salt=self.encoded_salt,
## rounds=self.rounds,
## header=header,
## )
##
## #=========================================================
## # helpers to calculate & cache constant data
## #=========================================================
## def _calc_get_hmac(self):
## return get_prf("hmac-" + iana_to_hashlib(self.alg))[0]
##
## def _calc_client_key(self):
## return self.get_hmac(self.digest, b("Client Key"))
##
## def _calc_stored_key(self):
## return self.get_hash(self.client_key)
##
## def _calc_server_key(self):
## return self.get_hmac(self.digest, b("Server Key"))
##
## def _calc_encoded_salt(self):
## return self.salt.encode("base64").rstrip()
##
## #=========================================================
## # hacks for calculated attributes
## #=========================================================
##
## def __getattr__(self, attr):
## if not attr.startswith("_"):
## f = getattr(self, "_calc_" + attr, None)
## if f:
## value = f()
## setattr(self, attr, value)
## return value
## raise AttributeError("attribute not found")
##
## def __dir__(self):
## cdir = dir(self.__class__)
## attrs = set(cdir)
## attrs.update(self.__dict__)
## attrs.update(attr[6:] for attr in cdir
## if attr.startswith("_calc_"))
## return sorted(attrs)
## #=========================================================
## # eoc
## #=========================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/scram.py | scram.py |
from hashlib import md5
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import to_unicode
from zdppy_password_hash.utils.binary import h64
from zdppy_password_hash.utils.compat import byte_elem_value, irange, u, \
uascii_to_str, unicode, str_to_bascii
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"sun_md5_crypt",
]
#=============================================================================
# backend
#=============================================================================
# constant data used by alg - Hamlet act 3 scene 1 + null char
# exact bytes as in http://www.ibiblio.org/pub/docs/books/gutenberg/etext98/2ws2610.txt
# from Project Gutenberg.
MAGIC_HAMLET = (
b"To be, or not to be,--that is the question:--\n"
b"Whether 'tis nobler in the mind to suffer\n"
b"The slings and arrows of outrageous fortune\n"
b"Or to take arms against a sea of troubles,\n"
b"And by opposing end them?--To die,--to sleep,--\n"
b"No more; and by a sleep to say we end\n"
b"The heartache, and the thousand natural shocks\n"
b"That flesh is heir to,--'tis a consummation\n"
b"Devoutly to be wish'd. To die,--to sleep;--\n"
b"To sleep! perchance to dream:--ay, there's the rub;\n"
b"For in that sleep of death what dreams may come,\n"
b"When we have shuffled off this mortal coil,\n"
b"Must give us pause: there's the respect\n"
b"That makes calamity of so long life;\n"
b"For who would bear the whips and scorns of time,\n"
b"The oppressor's wrong, the proud man's contumely,\n"
b"The pangs of despis'd love, the law's delay,\n"
b"The insolence of office, and the spurns\n"
b"That patient merit of the unworthy takes,\n"
b"When he himself might his quietus make\n"
b"With a bare bodkin? who would these fardels bear,\n"
b"To grunt and sweat under a weary life,\n"
b"But that the dread of something after death,--\n"
b"The undiscover'd country, from whose bourn\n"
b"No traveller returns,--puzzles the will,\n"
b"And makes us rather bear those ills we have\n"
b"Than fly to others that we know not of?\n"
b"Thus conscience does make cowards of us all;\n"
b"And thus the native hue of resolution\n"
b"Is sicklied o'er with the pale cast of thought;\n"
b"And enterprises of great pith and moment,\n"
b"With this regard, their currents turn awry,\n"
b"And lose the name of action.--Soft you now!\n"
b"The fair Ophelia!--Nymph, in thy orisons\n"
b"Be all my sins remember'd.\n\x00" #<- apparently null at end of C string is included (test vector won't pass otherwise)
)
# NOTE: these sequences are pre-calculated iteration ranges used by X & Y loops w/in rounds function below
xr = irange(7)
_XY_ROUNDS = [
tuple((i,i,i+3) for i in xr), # xrounds 0
tuple((i,i+1,i+4) for i in xr), # xrounds 1
tuple((i,i+8,(i+11)&15) for i in xr), # yrounds 0
tuple((i,(i+9)&15, (i+12)&15) for i in xr), # yrounds 1
]
del xr
def raw_sun_md5_crypt(secret, rounds, salt):
"""given secret & salt, return encoded sun-md5-crypt checksum"""
global MAGIC_HAMLET
assert isinstance(secret, bytes)
assert isinstance(salt, bytes)
# validate rounds
if rounds <= 0:
rounds = 0
real_rounds = 4096 + rounds
# NOTE: spec seems to imply max 'rounds' is 2**32-1
# generate initial digest to start off round 0.
# NOTE: algorithm 'salt' includes full config string w/ trailing "$"
result = md5(secret + salt).digest()
assert len(result) == 16
# NOTE: many things in this function have been inlined (to speed up the loop
# as much as possible), to the point that this code barely resembles
# the algorithm as described in the docs. in particular:
#
# * all accesses to a given bit have been inlined using the formula
# rbitval(bit) = (rval((bit>>3) & 15) >> (bit & 7)) & 1
#
# * the calculation of coinflip value R has been inlined
#
# * the conditional division of coinflip value V has been inlined as
# a shift right of 0 or 1.
#
# * the i, i+3, etc iterations are precalculated in lists.
#
# * the round-based conditional division of x & y is now performed
# by choosing an appropriate precalculated list, so that it only
# calculates the 7 bits which will actually be used.
#
X_ROUNDS_0, X_ROUNDS_1, Y_ROUNDS_0, Y_ROUNDS_1 = _XY_ROUNDS
# NOTE: % appears to be *slightly* slower than &, so we prefer & if possible
round = 0
while round < real_rounds:
# convert last result byte string to list of byte-ints for easy access
rval = [ byte_elem_value(c) for c in result ].__getitem__
# build up X bit by bit
x = 0
xrounds = X_ROUNDS_1 if (rval((round>>3) & 15)>>(round & 7)) & 1 else X_ROUNDS_0
for i, ia, ib in xrounds:
a = rval(ia)
b = rval(ib)
v = rval((a >> (b % 5)) & 15) >> ((b>>(a&7)) & 1)
x |= ((rval((v>>3)&15)>>(v&7))&1) << i
# build up Y bit by bit
y = 0
yrounds = Y_ROUNDS_1 if (rval(((round+64)>>3) & 15)>>(round & 7)) & 1 else Y_ROUNDS_0
for i, ia, ib in yrounds:
a = rval(ia)
b = rval(ib)
v = rval((a >> (b % 5)) & 15) >> ((b>>(a&7)) & 1)
y |= ((rval((v>>3)&15)>>(v&7))&1) << i
# extract x'th and y'th bit, xoring them together to yeild "coin flip"
coin = ((rval(x>>3) >> (x&7)) ^ (rval(y>>3) >> (y&7))) & 1
# construct hash for this round
h = md5(result)
if coin:
h.update(MAGIC_HAMLET)
h.update(unicode(round).encode("ascii"))
result = h.digest()
round += 1
# encode output
return h64.encode_transposed_bytes(result, _chk_offsets)
# NOTE: same offsets as md5_crypt
_chk_offsets = (
12,6,0,
13,7,1,
14,8,2,
15,9,3,
5,10,4,
11,
)
#=============================================================================
# handler
#=============================================================================
class sun_md5_crypt(uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""This class implements the Sun-MD5-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, a salt will be autogenerated (this is recommended).
If specified, it must be drawn from the regexp range ``[./0-9A-Za-z]``.
:type salt_size: int
:param salt_size:
If no salt is specified, this parameter can be used to specify
the size (in characters) of the autogenerated salt.
It currently defaults to 8.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 34000, must be between 0 and 4294963199, inclusive.
:type bare_salt: bool
:param bare_salt:
Optional flag used to enable an alternate salt digest behavior
used by some hash strings in this scheme.
This flag can be ignored by most users.
Defaults to ``False``.
(see :ref:`smc-bare-salt` for details).
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
name = "sun_md5_crypt"
setting_kwds = ("salt", "rounds", "bare_salt", "salt_size")
checksum_chars = uh.HASH64_CHARS
checksum_size = 22
# NOTE: docs say max password length is 255.
# release 9u2
# NOTE: not sure if original crypt has a salt size limit,
# all instances that have been seen use 8 chars.
default_salt_size = 8
max_salt_size = None
salt_chars = uh.HASH64_CHARS
default_rounds = 34000 # current zdppy_password_hash default
min_rounds = 0
max_rounds = 4294963199 ##2**32-1-4096
# XXX: ^ not sure what it does if past this bound... does 32 int roll over?
rounds_cost = "linear"
ident_values = (u("$md5$"), u("$md5,"))
#===================================================================
# instance attrs
#===================================================================
bare_salt = False # flag to indicate legacy hashes that lack "$$" suffix
#===================================================================
# constructor
#===================================================================
def __init__(self, bare_salt=False, **kwds):
self.bare_salt = bare_salt
super(sun_md5_crypt, self).__init__(**kwds)
#===================================================================
# internal helpers
#===================================================================
@classmethod
def identify(cls, hash):
hash = uh.to_unicode_for_identify(hash)
return hash.startswith(cls.ident_values)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
#
# detect if hash specifies rounds value.
# if so, parse and validate it.
# by end, set 'rounds' to int value, and 'tail' containing salt+chk
#
if hash.startswith(u("$md5$")):
rounds = 0
salt_idx = 5
elif hash.startswith(u("$md5,rounds=")):
idx = hash.find(u("$"), 12)
if idx == -1:
raise uh.exc.MalformedHashError(cls, "unexpected end of rounds")
rstr = hash[12:idx]
try:
rounds = int(rstr)
except ValueError:
raise uh.exc.MalformedHashError(cls, "bad rounds")
if rstr != unicode(rounds):
raise uh.exc.ZeroPaddedRoundsError(cls)
if rounds == 0:
# NOTE: not sure if this is forbidden by spec or not;
# but allowing it would complicate things,
# and it should never occur anyways.
raise uh.exc.MalformedHashError(cls, "explicit zero rounds")
salt_idx = idx+1
else:
raise uh.exc.InvalidHashError(cls)
#
# salt/checksum separation is kinda weird,
# to deal cleanly with some backward-compatible workarounds
# implemented by original implementation.
#
chk_idx = hash.rfind(u("$"), salt_idx)
if chk_idx == -1:
# ''-config for $-hash
salt = hash[salt_idx:]
chk = None
bare_salt = True
elif chk_idx == len(hash)-1:
if chk_idx > salt_idx and hash[-2] == u("$"):
raise uh.exc.MalformedHashError(cls, "too many '$' separators")
# $-config for $$-hash
salt = hash[salt_idx:-1]
chk = None
bare_salt = False
elif chk_idx > 0 and hash[chk_idx-1] == u("$"):
# $$-hash
salt = hash[salt_idx:chk_idx-1]
chk = hash[chk_idx+1:]
bare_salt = False
else:
# $-hash
salt = hash[salt_idx:chk_idx]
chk = hash[chk_idx+1:]
bare_salt = True
return cls(
rounds=rounds,
salt=salt,
checksum=chk,
bare_salt=bare_salt,
)
def to_string(self, _withchk=True):
ss = u('') if self.bare_salt else u('$')
rounds = self.rounds
if rounds > 0:
hash = u("$md5,rounds=%d$%s%s") % (rounds, self.salt, ss)
else:
hash = u("$md5$%s%s") % (self.salt, ss)
if _withchk:
chk = self.checksum
hash = u("%s$%s") % (hash, chk)
return uascii_to_str(hash)
#===================================================================
# primary interface
#===================================================================
# TODO: if we're on solaris, check for native crypt() support.
# this will require extra testing, to make sure native crypt
# actually behaves correctly. of particular importance:
# when using ""-config, make sure to append "$x" to string.
def _calc_checksum(self, secret):
# NOTE: no reference for how sun_md5_crypt handles unicode
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
config = str_to_bascii(self.to_string(_withchk=False))
return raw_sun_md5_crypt(secret, self.rounds, config).decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/sun_md5_crypt.py | sun_md5_crypt.py |
#=============================================================================
# imports
#=============================================================================
# core
from base64 import b64encode, b64decode
from hashlib import md5, sha1, sha256, sha512
import logging; log = logging.getLogger(__name__)
import re
# site
# pkg
from zdppy_password_hash.handlers.misc import plaintext
from zdppy_password_hash.utils import unix_crypt_schemes, to_unicode
from zdppy_password_hash.utils.compat import uascii_to_str, unicode, u
from zdppy_password_hash.utils.decor import classproperty
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"ldap_plaintext",
"ldap_md5",
"ldap_sha1",
"ldap_salted_md5",
"ldap_salted_sha1",
"ldap_salted_sha256",
"ldap_salted_sha512",
##"get_active_ldap_crypt_schemes",
"ldap_des_crypt",
"ldap_bsdi_crypt",
"ldap_md5_crypt",
"ldap_sha1_crypt",
"ldap_bcrypt",
"ldap_sha256_crypt",
"ldap_sha512_crypt",
]
#=============================================================================
# ldap helpers
#=============================================================================
class _Base64DigestHelper(uh.StaticHandler):
"""helper for ldap_md5 / ldap_sha1"""
# XXX: could combine this with hex digests in digests.py
ident = None # required - prefix identifier
_hash_func = None # required - hash function
_hash_regex = None # required - regexp to recognize hash
checksum_chars = uh.PADDED_BASE64_CHARS
@classproperty
def _hash_prefix(cls):
"""tell StaticHandler to strip ident from checksum"""
return cls.ident
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
chk = self._hash_func(secret).digest()
return b64encode(chk).decode("ascii")
class _SaltedBase64DigestHelper(uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""helper for ldap_salted_md5 / ldap_salted_sha1"""
setting_kwds = ("salt", "salt_size")
checksum_chars = uh.PADDED_BASE64_CHARS
ident = None # required - prefix identifier
_hash_func = None # required - hash function
_hash_regex = None # required - regexp to recognize hash
min_salt_size = max_salt_size = 4
# NOTE: openldap implementation uses 4 byte salt,
# but it's been reported (issue 30) that some servers use larger salts.
# the semi-related rfc3112 recommends support for up to 16 byte salts.
min_salt_size = 4
default_salt_size = 4
max_salt_size = 16
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
try:
data = b64decode(m.group("tmp").encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
cs = cls.checksum_size
assert cs
return cls(checksum=data[:cs], salt=data[cs:])
def to_string(self):
data = self.checksum + self.salt
hash = self.ident + b64encode(data).decode("ascii")
return uascii_to_str(hash)
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return self._hash_func(secret + self.salt).digest()
#=============================================================================
# implementations
#=============================================================================
class ldap_md5(_Base64DigestHelper):
"""This class stores passwords using LDAP's plain MD5 format, and follows the :ref:`password-hash-api`.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods have no optional keywords.
"""
name = "ldap_md5"
ident = u("{MD5}")
_hash_func = md5
_hash_regex = re.compile(u(r"^\{MD5\}(?P<chk>[+/a-zA-Z0-9]{22}==)$"))
class ldap_sha1(_Base64DigestHelper):
"""This class stores passwords using LDAP's plain SHA1 format, and follows the :ref:`password-hash-api`.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods have no optional keywords.
"""
name = "ldap_sha1"
ident = u("{SHA}")
_hash_func = sha1
_hash_regex = re.compile(u(r"^\{SHA\}(?P<chk>[+/a-zA-Z0-9]{27}=)$"))
class ldap_salted_md5(_SaltedBase64DigestHelper):
"""This class stores passwords using LDAP's salted MD5 format, and follows the :ref:`password-hash-api`.
It supports a 4-16 byte salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it may be any 4-16 byte string.
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 4 bytes for compatibility with the LDAP spec,
but some systems use larger salts, and Passlib supports
any value between 4-16.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This format now supports variable length salts, instead of a fix 4 bytes.
"""
name = "ldap_salted_md5"
ident = u("{SMD5}")
checksum_size = 16
_hash_func = md5
_hash_regex = re.compile(u(r"^\{SMD5\}(?P<tmp>[+/a-zA-Z0-9]{27,}={0,2})$"))
class ldap_salted_sha1(_SaltedBase64DigestHelper):
"""
This class stores passwords using LDAP's "Salted SHA1" format,
and follows the :ref:`password-hash-api`.
It supports a 4-16 byte salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it may be any 4-16 byte string.
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 4 bytes for compatibility with the LDAP spec,
but some systems use larger salts, and Passlib supports
any value between 4-16.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
.. versionchanged:: 1.6
This format now supports variable length salts, instead of a fix 4 bytes.
"""
name = "ldap_salted_sha1"
ident = u("{SSHA}")
checksum_size = 20
_hash_func = sha1
# NOTE: 32 = ceil((20 + 4) * 4/3)
_hash_regex = re.compile(u(r"^\{SSHA\}(?P<tmp>[+/a-zA-Z0-9]{32,}={0,2})$"))
class ldap_salted_sha256(_SaltedBase64DigestHelper):
"""
This class stores passwords using LDAP's "Salted SHA2-256" format,
and follows the :ref:`password-hash-api`.
It supports a 4-16 byte salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it may be any 4-16 byte string.
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 8 bytes for compatibility with the LDAP spec,
but Passlib supports any value between 4-16.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.7.3
"""
name = "ldap_salted_sha256"
ident = u("{SSHA256}")
checksum_size = 32
default_salt_size = 8
_hash_func = sha256
# NOTE: 48 = ceil((32 + 4) * 4/3)
_hash_regex = re.compile(u(r"^\{SSHA256\}(?P<tmp>[+/a-zA-Z0-9]{48,}={0,2})$"))
class ldap_salted_sha512(_SaltedBase64DigestHelper):
"""
This class stores passwords using LDAP's "Salted SHA2-512" format,
and follows the :ref:`password-hash-api`.
It supports a 4-16 byte salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it may be any 4-16 byte string.
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 8 bytes for compatibility with the LDAP spec,
but Passlib supports any value between 4-16.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.7.3
"""
name = "ldap_salted_sha512"
ident = u("{SSHA512}")
checksum_size = 64
default_salt_size = 8
_hash_func = sha512
# NOTE: 91 = ceil((64 + 4) * 4/3)
_hash_regex = re.compile(u(r"^\{SSHA512\}(?P<tmp>[+/a-zA-Z0-9]{91,}={0,2})$"))
class ldap_plaintext(plaintext):
"""This class stores passwords in plaintext, and follows the :ref:`password-hash-api`.
This class acts much like the generic :class:`!zdppy_password_hash.hash.plaintext` handler,
except that it will identify a hash only if it does NOT begin with the ``{XXX}`` identifier prefix
used by RFC2307 passwords.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`, :meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods all require the
following additional contextual keyword:
:type encoding: str
:param encoding:
This controls the character encoding to use (defaults to ``utf-8``).
This encoding will be used to encode :class:`!unicode` passwords
under Python 2, and decode :class:`!bytes` hashes under Python 3.
.. versionchanged:: 1.6
The ``encoding`` keyword was added.
"""
# NOTE: this subclasses plaintext, since all it does differently
# is override identify()
name = "ldap_plaintext"
_2307_pat = re.compile(u(r"^\{\w+\}.*$"))
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls):
# Overridding plaintext.genconfig() since it returns "",
# but have to return non-empty value due to identify() below
return "!"
@classmethod
def identify(cls, hash):
# NOTE: identifies all strings EXCEPT those with {XXX} prefix
hash = uh.to_unicode_for_identify(hash)
return bool(hash) and cls._2307_pat.match(hash) is None
#=============================================================================
# {CRYPT} wrappers
# the following are wrappers around the base crypt algorithms,
# which add the ldap required {CRYPT} prefix
#=============================================================================
ldap_crypt_schemes = [ 'ldap_' + name for name in unix_crypt_schemes ]
def _init_ldap_crypt_handlers():
# NOTE: I don't like to implicitly modify globals() like this,
# but don't want to write out all these handlers out either :)
g = globals()
for wname in unix_crypt_schemes:
name = 'ldap_' + wname
g[name] = uh.PrefixWrapper(name, wname, prefix=u("{CRYPT}"), lazy=True)
del g
_init_ldap_crypt_handlers()
##_lcn_host = None
##def get_host_ldap_crypt_schemes():
## global _lcn_host
## if _lcn_host is None:
## from zdppy_password_hash.hosts import host_context
## schemes = host_context.schemes()
## _lcn_host = [
## "ldap_" + name
## for name in unix_crypt_names
## if name in schemes
## ]
## return _lcn_host
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/ldap_digests.py | ldap_digests.py |
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify, unhexlify
from base64 import b64encode, b64decode
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import to_unicode
from zdppy_password_hash.utils.binary import ab64_decode, ab64_encode
from zdppy_password_hash.utils.compat import str_to_bascii, u, uascii_to_str, unicode
from zdppy_password_hash.crypto.digest import pbkdf2_hmac
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"pbkdf2_sha1",
"pbkdf2_sha256",
"pbkdf2_sha512",
"cta_pbkdf2_sha1",
"dlitz_pbkdf2_sha1",
"grub_pbkdf2_sha512",
]
#=============================================================================
#
#=============================================================================
class Pbkdf2DigestHandler(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""base class for various pbkdf2_{digest} algorithms"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
setting_kwds = ("salt", "salt_size", "rounds")
checksum_chars = uh.HASH64_CHARS
#--HasSalt--
default_salt_size = 16
max_salt_size = 1024
#--HasRounds--
default_rounds = None # set by subclass
min_rounds = 1
max_rounds = 0xffffffff # setting at 32-bit limit for now
rounds_cost = "linear"
#--this class--
_digest = None # name of subclass-specified hash
# NOTE: max_salt_size and max_rounds are arbitrarily chosen to provide sanity check.
# the underlying pbkdf2 specifies no bounds for either.
# NOTE: defaults chosen to be at least as large as pbkdf2 rfc recommends...
# >8 bytes of entropy in salt, >1000 rounds
# increased due to time since rfc established
#===================================================================
# methods
#===================================================================
@classmethod
def from_string(cls, hash):
rounds, salt, chk = uh.parse_mc3(hash, cls.ident, handler=cls)
salt = ab64_decode(salt.encode("ascii"))
if chk:
chk = ab64_decode(chk.encode("ascii"))
return cls(rounds=rounds, salt=salt, checksum=chk)
def to_string(self):
salt = ab64_encode(self.salt).decode("ascii")
chk = ab64_encode(self.checksum).decode("ascii")
return uh.render_mc3(self.ident, self.rounds, salt, chk)
def _calc_checksum(self, secret):
# NOTE: pbkdf2_hmac() will encode secret & salt using UTF8
return pbkdf2_hmac(self._digest, secret, self.salt, self.rounds, self.checksum_size)
def create_pbkdf2_hash(hash_name, digest_size, rounds=12000, ident=None, module=__name__):
"""create new Pbkdf2DigestHandler subclass for a specific hash"""
name = 'pbkdf2_' + hash_name
if ident is None:
ident = u("$pbkdf2-%s$") % (hash_name,)
base = Pbkdf2DigestHandler
return type(name, (base,), dict(
__module__=module, # so ABCMeta won't clobber it.
name=name,
ident=ident,
_digest = hash_name,
default_rounds=rounds,
checksum_size=digest_size,
encoded_checksum_size=(digest_size*4+2)//3,
__doc__="""This class implements a generic ``PBKDF2-HMAC-%(digest)s``-based password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt bytes.
If specified, the length must be between 0-1024 bytes.
If not specified, a %(dsc)d byte salt will be autogenerated (this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to %(dsc)d bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to %(dr)d, but must be within ``range(1,1<<32)``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
""" % dict(digest=hash_name.upper(), dsc=base.default_salt_size, dr=rounds)
))
#------------------------------------------------------------------------
# derived handlers
#------------------------------------------------------------------------
pbkdf2_sha1 = create_pbkdf2_hash("sha1", 20, 131000, ident=u("$pbkdf2$"))
pbkdf2_sha256 = create_pbkdf2_hash("sha256", 32, 29000)
pbkdf2_sha512 = create_pbkdf2_hash("sha512", 64, 25000)
ldap_pbkdf2_sha1 = uh.PrefixWrapper("ldap_pbkdf2_sha1", pbkdf2_sha1, "{PBKDF2}", "$pbkdf2$", ident=True)
ldap_pbkdf2_sha256 = uh.PrefixWrapper("ldap_pbkdf2_sha256", pbkdf2_sha256, "{PBKDF2-SHA256}", "$pbkdf2-sha256$", ident=True)
ldap_pbkdf2_sha512 = uh.PrefixWrapper("ldap_pbkdf2_sha512", pbkdf2_sha512, "{PBKDF2-SHA512}", "$pbkdf2-sha512$", ident=True)
#=============================================================================
# cryptacular's pbkdf2 hash
#=============================================================================
# bytes used by cta hash for base64 values 63 & 64
CTA_ALTCHARS = b"-_"
class cta_pbkdf2_sha1(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements Cryptacular's PBKDF2-based crypt algorithm, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt bytes.
If specified, it may be any length.
If not specified, a one will be autogenerated (this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 16 bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 60000, must be within ``range(1,1<<32)``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "cta_pbkdf2_sha1"
setting_kwds = ("salt", "salt_size", "rounds")
ident = u("$p5k2$")
checksum_size = 20
# NOTE: max_salt_size and max_rounds are arbitrarily chosen to provide a
# sanity check. underlying algorithm (and reference implementation)
# allows effectively unbounded values for both of these parameters.
#--HasSalt--
default_salt_size = 16
max_salt_size = 1024
#--HasRounds--
default_rounds = pbkdf2_sha1.default_rounds
min_rounds = 1
max_rounds = 0xffffffff # setting at 32-bit limit for now
rounds_cost = "linear"
#===================================================================
# formatting
#===================================================================
# hash $p5k2$1000$ZxK4ZBJCfQg=$jJZVscWtO--p1-xIZl6jhO2LKR0=
# ident $p5k2$
# rounds 1000
# salt ZxK4ZBJCfQg=
# chk jJZVscWtO--p1-xIZl6jhO2LKR0=
# NOTE: rounds in hex
@classmethod
def from_string(cls, hash):
# NOTE: zdppy_password_hash deviation - forbidding zero-padded rounds
rounds, salt, chk = uh.parse_mc3(hash, cls.ident, rounds_base=16, handler=cls)
salt = b64decode(salt.encode("ascii"), CTA_ALTCHARS)
if chk:
chk = b64decode(chk.encode("ascii"), CTA_ALTCHARS)
return cls(rounds=rounds, salt=salt, checksum=chk)
def to_string(self):
salt = b64encode(self.salt, CTA_ALTCHARS).decode("ascii")
chk = b64encode(self.checksum, CTA_ALTCHARS).decode("ascii")
return uh.render_mc3(self.ident, self.rounds, salt, chk, rounds_base=16)
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
# NOTE: pbkdf2_hmac() will encode secret & salt using utf-8
return pbkdf2_hmac("sha1", secret, self.salt, self.rounds, 20)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# dlitz's pbkdf2 hash
#=============================================================================
class dlitz_pbkdf2_sha1(uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""This class implements Dwayne Litzenberger's PBKDF2-based crypt algorithm, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If specified, it may be any length, but must use the characters in the regexp range ``[./0-9A-Za-z]``.
If not specified, a 16 character salt will be autogenerated (this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 16 bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 60000, must be within ``range(1,1<<32)``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "dlitz_pbkdf2_sha1"
setting_kwds = ("salt", "salt_size", "rounds")
ident = u("$p5k2$")
_stub_checksum = u("0" * 48 + "=")
# NOTE: max_salt_size and max_rounds are arbitrarily chosen to provide a
# sanity check. underlying algorithm (and reference implementation)
# allows effectively unbounded values for both of these parameters.
#--HasSalt--
default_salt_size = 16
max_salt_size = 1024
salt_chars = uh.HASH64_CHARS
#--HasRounds--
# NOTE: for security, the default here is set to match pbkdf2_sha1,
# even though this hash's extra block makes it twice as slow.
default_rounds = pbkdf2_sha1.default_rounds
min_rounds = 1
max_rounds = 0xffffffff # setting at 32-bit limit for now
rounds_cost = "linear"
#===================================================================
# formatting
#===================================================================
# hash $p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g
# ident $p5k2$
# rounds c
# salt u9HvcT4d
# chk Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g
# rounds in lowercase hex, no zero padding
@classmethod
def from_string(cls, hash):
rounds, salt, chk = uh.parse_mc3(hash, cls.ident, rounds_base=16,
default_rounds=400, handler=cls)
return cls(rounds=rounds, salt=salt, checksum=chk)
def to_string(self):
rounds = self.rounds
if rounds == 400:
rounds = None # omit rounds measurement if == 400
return uh.render_mc3(self.ident, rounds, self.salt, self.checksum, rounds_base=16)
def _get_config(self):
rounds = self.rounds
if rounds == 400:
rounds = None # omit rounds measurement if == 400
return uh.render_mc3(self.ident, rounds, self.salt, None, rounds_base=16)
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
# NOTE: pbkdf2_hmac() will encode secret & salt using utf-8
salt = self._get_config()
result = pbkdf2_hmac("sha1", secret, salt, self.rounds, 24)
return ab64_encode(result).decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# crowd
#=============================================================================
class atlassian_pbkdf2_sha1(uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements the PBKDF2 hash used by Atlassian.
It supports a fixed-length salt, and a fixed number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt bytes.
If specified, the length must be exactly 16 bytes.
If not specified, a salt will be autogenerated (this is recommended).
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#--GenericHandler--
name = "atlassian_pbkdf2_sha1"
setting_kwds =("salt",)
ident = u("{PKCS5S2}")
checksum_size = 32
#--HasRawSalt--
min_salt_size = max_salt_size = 16
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
ident = cls.ident
if not hash.startswith(ident):
raise uh.exc.InvalidHashError(cls)
data = b64decode(hash[len(ident):].encode("ascii"))
salt, chk = data[:16], data[16:]
return cls(salt=salt, checksum=chk)
def to_string(self):
data = self.salt + self.checksum
hash = self.ident + b64encode(data).decode("ascii")
return uascii_to_str(hash)
def _calc_checksum(self, secret):
# TODO: find out what crowd's policy is re: unicode
# crowd seems to use a fixed number of rounds.
# NOTE: pbkdf2_hmac() will encode secret & salt using utf-8
return pbkdf2_hmac("sha1", secret, self.salt, 10000, 32)
#=============================================================================
# grub
#=============================================================================
class grub_pbkdf2_sha512(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements Grub's pbkdf2-hmac-sha512 hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: bytes
:param salt:
Optional salt bytes.
If specified, the length must be between 0-1024 bytes.
If not specified, a 64 byte salt will be autogenerated (this is recommended).
:type salt_size: int
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 64 bytes, but can be any value between 0 and 1024.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 19000, but must be within ``range(1,1<<32)``.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
name = "grub_pbkdf2_sha512"
setting_kwds = ("salt", "salt_size", "rounds")
ident = u("grub.pbkdf2.sha512.")
checksum_size = 64
# NOTE: max_salt_size and max_rounds are arbitrarily chosen to provide a
# sanity check. the underlying pbkdf2 specifies no bounds for either,
# and it's not clear what grub specifies.
default_salt_size = 64
max_salt_size = 1024
default_rounds = pbkdf2_sha512.default_rounds
min_rounds = 1
max_rounds = 0xffffffff # setting at 32-bit limit for now
rounds_cost = "linear"
@classmethod
def from_string(cls, hash):
rounds, salt, chk = uh.parse_mc3(hash, cls.ident, sep=u("."),
handler=cls)
salt = unhexlify(salt.encode("ascii"))
if chk:
chk = unhexlify(chk.encode("ascii"))
return cls(rounds=rounds, salt=salt, checksum=chk)
def to_string(self):
salt = hexlify(self.salt).decode("ascii").upper()
chk = hexlify(self.checksum).decode("ascii").upper()
return uh.render_mc3(self.ident, self.rounds, salt, chk, sep=u("."))
def _calc_checksum(self, secret):
# TODO: find out what grub's policy is re: unicode
# NOTE: pbkdf2_hmac() will encode secret & salt using utf-8
return pbkdf2_hmac("sha512", secret, self.salt, self.rounds, 64)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/pbkdf2.py | pbkdf2.py |
#=============================================================================
# imports
#=============================================================================
# core
import hashlib
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import to_native_str, to_bytes, render_bytes, consteq
from zdppy_password_hash.utils.compat import unicode, str_to_uascii
import zdppy_password_hash.utils.handlers as uh
from zdppy_password_hash.crypto.digest import lookup_hash
# local
__all__ = [
"create_hex_hash",
"hex_md4",
"hex_md5",
"hex_sha1",
"hex_sha256",
"hex_sha512",
]
#=============================================================================
# helpers for hexadecimal hashes
#=============================================================================
class HexDigestHash(uh.StaticHandler):
"""this provides a template for supporting passwords stored as plain hexadecimal hashes"""
#===================================================================
# class attrs
#===================================================================
_hash_func = None # hash function to use - filled in by create_hex_hash()
checksum_size = None # filled in by create_hex_hash()
checksum_chars = uh.HEX_CHARS
#: special for detecting if _hash_func is just a stub method.
supported = True
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return str_to_uascii(self._hash_func(secret).hexdigest())
#===================================================================
# eoc
#===================================================================
def create_hex_hash(digest, module=__name__, django_name=None, required=True):
"""
create hex-encoded unsalted hasher for specified digest algorithm.
.. versionchanged:: 1.7.3
If called with unknown/supported digest, won't throw error immediately,
but instead return a dummy hasher that will throw error when called.
set ``required=True`` to restore old behavior.
"""
info = lookup_hash(digest, required=required)
name = "hex_" + info.name
if not info.supported:
info.digest_size = 0
hasher = type(name, (HexDigestHash,), dict(
name=name,
__module__=module, # so ABCMeta won't clobber it
_hash_func=staticmethod(info.const), # sometimes it's a function, sometimes not. so wrap it.
checksum_size=info.digest_size*2,
__doc__="""This class implements a plain hexadecimal %s hash, and follows the :ref:`password-hash-api`.
It supports no optional or contextual keywords.
""" % (info.name,)
))
if not info.supported:
hasher.supported = False
if django_name:
hasher.django_name = django_name
return hasher
#=============================================================================
# predefined handlers
#=============================================================================
# NOTE: some digests below are marked as "required=False", because these may not be present on
# FIPS systems (see issue 116). if missing, will return stub hasher that throws error
# if an attempt is made to actually use hash/verify with them.
hex_md4 = create_hex_hash("md4", required=False)
hex_md5 = create_hex_hash("md5", django_name="unsalted_md5", required=False)
hex_sha1 = create_hex_hash("sha1", required=False)
hex_sha256 = create_hex_hash("sha256")
hex_sha512 = create_hex_hash("sha512")
#=============================================================================
# htdigest
#=============================================================================
class htdigest(uh.MinimalHandler):
"""htdigest hash function.
.. todo::
document this hash
"""
name = "htdigest"
setting_kwds = ()
context_kwds = ("user", "realm", "encoding")
default_encoding = "utf-8"
@classmethod
def hash(cls, secret, user, realm, encoding=None):
# NOTE: this was deliberately written so that raw bytes are passed through
# unchanged, the encoding kwd is only used to handle unicode values.
if not encoding:
encoding = cls.default_encoding
uh.validate_secret(secret)
if isinstance(secret, unicode):
secret = secret.encode(encoding)
user = to_bytes(user, encoding, "user")
realm = to_bytes(realm, encoding, "realm")
data = render_bytes("%s:%s:%s", user, realm, secret)
return hashlib.md5(data).hexdigest()
@classmethod
def _norm_hash(cls, hash):
"""normalize hash to native string, and validate it"""
hash = to_native_str(hash, param="hash")
if len(hash) != 32:
raise uh.exc.MalformedHashError(cls, "wrong size")
for char in hash:
if char not in uh.LC_HEX_CHARS:
raise uh.exc.MalformedHashError(cls, "invalid chars in hash")
return hash
@classmethod
def verify(cls, secret, hash, user, realm, encoding="utf-8"):
hash = cls._norm_hash(hash)
other = cls.hash(secret, user, realm, encoding)
return consteq(hash, other)
@classmethod
def identify(cls, hash):
try:
cls._norm_hash(hash)
except ValueError:
return False
return True
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls):
return cls.hash("", "", "")
@uh.deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, user, realm, encoding=None):
# NOTE: 'config' is ignored, as this hash has no salting / other configuration.
# just have to make sure it's valid.
cls._norm_hash(config)
return cls.hash(secret, user, realm, encoding)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/digests.py | digests.py |
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify, unhexlify
from hashlib import sha1
import re
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import to_unicode, xor_bytes
from zdppy_password_hash.utils.compat import irange, u, \
uascii_to_str, unicode, str_to_uascii
from zdppy_password_hash.crypto.des import des_encrypt_block
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"oracle10g",
"oracle11g"
]
#=============================================================================
# oracle10
#=============================================================================
def des_cbc_encrypt(key, value, iv=b'\x00' * 8, pad=b'\x00'):
"""performs des-cbc encryption, returns only last block.
this performs a specific DES-CBC encryption implementation
as needed by the Oracle10 hash. it probably won't be useful for
other purposes as-is.
input value is null-padded to multiple of 8 bytes.
:arg key: des key as bytes
:arg value: value to encrypt, as bytes.
:param iv: optional IV
:param pad: optional pad byte
:returns: last block of DES-CBC encryption of all ``value``'s byte blocks.
"""
value += pad * (-len(value) % 8) # null pad to multiple of 8
hash = iv # start things off
for offset in irange(0,len(value),8):
chunk = xor_bytes(hash, value[offset:offset+8])
hash = des_encrypt_block(key, chunk)
return hash
# magic string used as initial des key by oracle10
ORACLE10_MAGIC = b"\x01\x23\x45\x67\x89\xAB\xCD\xEF"
class oracle10(uh.HasUserContext, uh.StaticHandler):
"""This class implements the password hash used by Oracle up to version 10g, and follows the :ref:`password-hash-api`.
It does a single round of hashing, and relies on the username as the salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`, :meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods all require the
following additional contextual keywords:
:type user: str
:param user: name of oracle user account this password is associated with.
"""
#===================================================================
# algorithm information
#===================================================================
name = "oracle10"
checksum_chars = uh.HEX_CHARS
checksum_size = 16
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.upper()
def _calc_checksum(self, secret):
# FIXME: not sure how oracle handles unicode.
# online docs about 10g hash indicate it puts ascii chars
# in a 2-byte encoding w/ the high byte set to null.
# they don't say how it handles other chars, or what encoding.
#
# so for now, encoding secret & user to utf-16-be,
# since that fits, and if secret/user is bytes,
# we assume utf-8, and decode first.
#
# this whole mess really needs someone w/ an oracle system,
# and some answers :)
if isinstance(secret, bytes):
secret = secret.decode("utf-8")
user = to_unicode(self.user, "utf-8", param="user")
input = (user+secret).upper().encode("utf-16-be")
hash = des_cbc_encrypt(ORACLE10_MAGIC, input)
hash = des_cbc_encrypt(hash, input)
return hexlify(hash).decode("ascii").upper()
#===================================================================
# eoc
#===================================================================
#=============================================================================
# oracle11
#=============================================================================
class oracle11(uh.HasSalt, uh.GenericHandler):
"""This class implements the Oracle11g password hash, and follows the :ref:`password-hash-api`.
It supports a fixed-length salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, one will be autogenerated (this is recommended).
If specified, it must be 20 hexadecimal characters.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include
``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "oracle11"
setting_kwds = ("salt",)
checksum_size = 40
checksum_chars = uh.UPPER_HEX_CHARS
#--HasSalt--
min_salt_size = max_salt_size = 20
salt_chars = uh.UPPER_HEX_CHARS
#===================================================================
# methods
#===================================================================
_hash_regex = re.compile(u("^S:(?P<chk>[0-9a-f]{40})(?P<salt>[0-9a-f]{20})$"), re.I)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
salt, chk = m.group("salt", "chk")
return cls(salt=salt, checksum=chk.upper())
def to_string(self):
chk = self.checksum
hash = u("S:%s%s") % (chk.upper(), self.salt.upper())
return uascii_to_str(hash)
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
chk = sha1(secret + unhexlify(self.salt.encode("ascii"))).hexdigest()
return str_to_uascii(chk).upper()
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/oracle.py | oracle.py |
from base64 import b64encode, b64decode
import re
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.utils import to_unicode
import zdppy_password_hash.utils.handlers as uh
from zdppy_password_hash.utils.compat import bascii_to_str, iteritems, u,\
unicode
from zdppy_password_hash.crypto.digest import pbkdf1
# local
__all__ = [
'fshp',
]
#=============================================================================
# sha1-crypt
#=============================================================================
class fshp(uh.HasRounds, uh.HasRawSalt, uh.HasRawChecksum, uh.GenericHandler):
"""This class implements the FSHP password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts the following optional keywords:
:param salt:
Optional raw salt string.
If not specified, one will be autogenerated (this is recommended).
:param salt_size:
Optional number of bytes to use when autogenerating new salts.
Defaults to 16 bytes, but can be any non-negative value.
:param rounds:
Optional number of rounds to use.
Defaults to 480000, must be between 1 and 4294967295, inclusive.
:param variant:
Optionally specifies variant of FSHP to use.
* ``0`` - uses SHA-1 digest (deprecated).
* ``1`` - uses SHA-2/256 digest (default).
* ``2`` - uses SHA-2/384 digest.
* ``3`` - uses SHA-2/512 digest.
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~zdppy_password_hash.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
#--GenericHandler--
name = "fshp"
setting_kwds = ("salt", "salt_size", "rounds", "variant")
checksum_chars = uh.PADDED_BASE64_CHARS
ident = u("{FSHP")
# checksum_size is property() that depends on variant
#--HasRawSalt--
default_salt_size = 16 # current zdppy_password_hash default, FSHP uses 8
max_salt_size = None
#--HasRounds--
# FIXME: should probably use different default rounds
# based on the variant. setting for default variant (sha256) for now.
default_rounds = 480000 # current zdppy_password_hash default, FSHP uses 4096
min_rounds = 1 # set by FSHP
max_rounds = 4294967295 # 32-bit integer limit - not set by FSHP
rounds_cost = "linear"
#--variants--
default_variant = 1
_variant_info = {
# variant: (hash name, digest size)
0: ("sha1", 20),
1: ("sha256", 32),
2: ("sha384", 48),
3: ("sha512", 64),
}
_variant_aliases = dict(
[(unicode(k),k) for k in _variant_info] +
[(v[0],k) for k,v in iteritems(_variant_info)]
)
#===================================================================
# configuration
#===================================================================
@classmethod
def using(cls, variant=None, **kwds):
subcls = super(fshp, cls).using(**kwds)
if variant is not None:
subcls.default_variant = cls._norm_variant(variant)
return subcls
#===================================================================
# instance attrs
#===================================================================
variant = None
#===================================================================
# init
#===================================================================
def __init__(self, variant=None, **kwds):
# NOTE: variant must be set first, since it controls checksum size, etc.
self.use_defaults = kwds.get("use_defaults") # load this early
if variant is not None:
variant = self._norm_variant(variant)
elif self.use_defaults:
variant = self.default_variant
assert self._norm_variant(variant) == variant, "invalid default variant: %r" % (variant,)
else:
raise TypeError("no variant specified")
self.variant = variant
super(fshp, self).__init__(**kwds)
@classmethod
def _norm_variant(cls, variant):
if isinstance(variant, bytes):
variant = variant.decode("ascii")
if isinstance(variant, unicode):
try:
variant = cls._variant_aliases[variant]
except KeyError:
raise ValueError("invalid fshp variant")
if not isinstance(variant, int):
raise TypeError("fshp variant must be int or known alias")
if variant not in cls._variant_info:
raise ValueError("invalid fshp variant")
return variant
@property
def checksum_alg(self):
return self._variant_info[self.variant][0]
@property
def checksum_size(self):
return self._variant_info[self.variant][1]
#===================================================================
# formatting
#===================================================================
_hash_regex = re.compile(u(r"""
^
\{FSHP
(\d+)\| # variant
(\d+)\| # salt size
(\d+)\} # rounds
([a-zA-Z0-9+/]+={0,3}) # digest
$"""), re.X)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
m = cls._hash_regex.match(hash)
if not m:
raise uh.exc.InvalidHashError(cls)
variant, salt_size, rounds, data = m.group(1,2,3,4)
variant = int(variant)
salt_size = int(salt_size)
rounds = int(rounds)
try:
data = b64decode(data.encode("ascii"))
except TypeError:
raise uh.exc.MalformedHashError(cls)
salt = data[:salt_size]
chk = data[salt_size:]
return cls(salt=salt, checksum=chk, rounds=rounds, variant=variant)
def to_string(self):
chk = self.checksum
salt = self.salt
data = bascii_to_str(b64encode(salt+chk))
return "{FSHP%d|%d|%d}%s" % (self.variant, len(salt), self.rounds, data)
#===================================================================
# backend
#===================================================================
def _calc_checksum(self, secret):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
# NOTE: for some reason, FSHP uses pbkdf1 with password & salt reversed.
# this has only a minimal impact on security,
# but it is worth noting this deviation.
return pbkdf1(
digest=self.checksum_alg,
secret=self.salt,
salt=secret,
rounds=self.rounds,
keylen=self.checksum_size,
)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/fshp.py | fshp.py |
from hashlib import sha1
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import to_native_str
from zdppy_password_hash.utils.compat import bascii_to_str, unicode, u, \
byte_elem_value, str_to_uascii
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
'mysql323',
'mysq41',
]
#=============================================================================
# backend
#=============================================================================
class mysql323(uh.StaticHandler):
"""This class implements the MySQL 3.2.3 password hash, and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept no optional keywords.
"""
#===================================================================
# class attrs
#===================================================================
name = "mysql323"
checksum_size = 16
checksum_chars = uh.HEX_CHARS
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
# FIXME: no idea if mysql has a policy about handling unicode passwords
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
MASK_32 = 0xffffffff
MASK_31 = 0x7fffffff
WHITE = b' \t'
nr1 = 0x50305735
nr2 = 0x12345671
add = 7
for c in secret:
if c in WHITE:
continue
tmp = byte_elem_value(c)
nr1 ^= ((((nr1 & 63)+add)*tmp) + (nr1 << 8)) & MASK_32
nr2 = (nr2+((nr2 << 8) ^ nr1)) & MASK_32
add = (add+tmp) & MASK_32
return u("%08x%08x") % (nr1 & MASK_31, nr2 & MASK_31)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# handler
#=============================================================================
class mysql41(uh.StaticHandler):
"""This class implements the MySQL 4.1 password hash, and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept no optional keywords.
"""
#===================================================================
# class attrs
#===================================================================
name = "mysql41"
_hash_prefix = u("*")
checksum_chars = uh.HEX_CHARS
checksum_size = 40
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.upper()
def _calc_checksum(self, secret):
# FIXME: no idea if mysql has a policy about handling unicode passwords
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
return str_to_uascii(sha1(sha1(secret).digest()).hexdigest()).upper()
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/mysql.py | mysql.py |
#=============================================================================
# imports
#=============================================================================
# core
from binascii import hexlify
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils import to_unicode, right_pad_string
from zdppy_password_hash.utils.compat import unicode
from zdppy_password_hash.crypto.digest import lookup_hash
md4 = lookup_hash("md4").const
import zdppy_password_hash.utils.handlers as uh
# local
__all__ = [
"lmhash",
"nthash",
"bsd_nthash",
"msdcc",
"msdcc2",
]
#=============================================================================
# lanman hash
#=============================================================================
class lmhash(uh.TruncateMixin, uh.HasEncodingContext, uh.StaticHandler):
"""This class implements the Lan Manager Password hash, and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.using` method accepts a single
optional keyword:
:param bool truncate_error:
By default, this will silently truncate passwords larger than 14 bytes.
Setting ``truncate_error=True`` will cause :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`
to raise a :exc:`~zdppy_password_hash.exc.PasswordTruncateError` instead.
.. versionadded:: 1.7
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods accept a single
optional keyword:
:type encoding: str
:param encoding:
This specifies what character encoding LMHASH should use when
calculating digest. It defaults to ``cp437``, the most
common encoding encountered.
Note that while this class outputs digests in lower-case hexadecimal,
it will accept upper-case as well.
"""
#===================================================================
# class attrs
#===================================================================
#--------------------
# PasswordHash
#--------------------
name = "lmhash"
setting_kwds = ("truncate_error",)
#--------------------
# GenericHandler
#--------------------
checksum_chars = uh.HEX_CHARS
checksum_size = 32
#--------------------
# TruncateMixin
#--------------------
truncate_size = 14
#--------------------
# custom
#--------------------
default_encoding = "cp437"
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
# check for truncation (during .hash() calls only)
if self.use_defaults:
self._check_truncate_policy(secret)
return hexlify(self.raw(secret, self.encoding)).decode("ascii")
# magic constant used by LMHASH
_magic = b"KGS!@#$%"
@classmethod
def raw(cls, secret, encoding=None):
"""encode password using LANMAN hash algorithm.
:type secret: unicode or utf-8 encoded bytes
:arg secret: secret to hash
:type encoding: str
:arg encoding:
optional encoding to use for unicode inputs.
this defaults to ``cp437``, which is the
common case for most situations.
:returns: returns string of raw bytes
"""
if not encoding:
encoding = cls.default_encoding
# some nice empircal data re: different encodings is at...
# http://www.openwall.com/lists/john-dev/2011/08/01/2
# http://www.freerainbowtables.com/phpBB3/viewtopic.php?t=387&p=12163
from zdppy_password_hash.crypto.des import des_encrypt_block
MAGIC = cls._magic
if isinstance(secret, unicode):
# perform uppercasing while we're still unicode,
# to give a better shot at getting non-ascii chars right.
# (though some codepages do NOT upper-case the same as unicode).
secret = secret.upper().encode(encoding)
elif isinstance(secret, bytes):
# FIXME: just trusting ascii upper will work?
# and if not, how to do codepage specific case conversion?
# we could decode first using <encoding>,
# but *that* might not always be right.
secret = secret.upper()
else:
raise TypeError("secret must be unicode or bytes")
secret = right_pad_string(secret, 14)
return des_encrypt_block(secret[0:7], MAGIC) + \
des_encrypt_block(secret[7:14], MAGIC)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# ntlm hash
#=============================================================================
class nthash(uh.StaticHandler):
"""This class implements the NT Password hash, and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept no optional keywords.
Note that while this class outputs lower-case hexadecimal digests,
it will accept upper-case digests as well.
"""
#===================================================================
# class attrs
#===================================================================
name = "nthash"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
#===================================================================
# methods
#===================================================================
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret)).decode("ascii")
@classmethod
def raw(cls, secret):
"""encode password using MD4-based NTHASH algorithm
:arg secret: secret as unicode or utf-8 encoded bytes
:returns: returns string of raw bytes
"""
secret = to_unicode(secret, "utf-8", param="secret")
# XXX: found refs that say only first 128 chars are used.
return md4(secret.encode("utf-16-le")).digest()
@classmethod
def raw_nthash(cls, secret, hex=False):
warn("nthash.raw_nthash() is deprecated, and will be removed "
"in Passlib 1.8, please use nthash.raw() instead",
DeprecationWarning)
ret = nthash.raw(secret)
return hexlify(ret).decode("ascii") if hex else ret
#===================================================================
# eoc
#===================================================================
bsd_nthash = uh.PrefixWrapper("bsd_nthash", nthash, prefix="$3$$", ident="$3$$",
doc="""The class support FreeBSD's representation of NTHASH
(which is compatible with the :ref:`modular-crypt-format`),
and follows the :ref:`password-hash-api`.
It has no salt and a single fixed round.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash` and :meth:`~zdppy_password_hash.ifc.PasswordHash.genconfig` methods accept no optional keywords.
""")
##class ntlm_pair(object):
## "combined lmhash & nthash"
## name = "ntlm_pair"
## setting_kwds = ()
## _hash_regex = re.compile(u"^(?P<lm>[0-9a-f]{32}):(?P<nt>[0-9][a-f]{32})$",
## re.I)
##
## @classmethod
## def identify(cls, hash):
## hash = to_unicode(hash, "latin-1", "hash")
## return len(hash) == 65 and cls._hash_regex.match(hash) is not None
##
## @classmethod
## def hash(cls, secret, config=None):
## if config is not None and not cls.identify(config):
## raise uh.exc.InvalidHashError(cls)
## return lmhash.hash(secret) + ":" + nthash.hash(secret)
##
## @classmethod
## def verify(cls, secret, hash):
## hash = to_unicode(hash, "ascii", "hash")
## m = cls._hash_regex.match(hash)
## if not m:
## raise uh.exc.InvalidHashError(cls)
## lm, nt = m.group("lm", "nt")
## # NOTE: verify against both in case encoding issue
## # causes one not to match.
## return lmhash.verify(secret, lm) or nthash.verify(secret, nt)
#=============================================================================
# msdcc v1
#=============================================================================
class msdcc(uh.HasUserContext, uh.StaticHandler):
"""This class implements Microsoft's Domain Cached Credentials password hash,
and follows the :ref:`password-hash-api`.
It has a fixed number of rounds, and uses the associated
username as the salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`, :meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods
have the following optional keywords:
:type user: str
:param user:
String containing name of user account this password is associated with.
This is required to properly calculate the hash.
This keyword is case-insensitive, and should contain just the username
(e.g. ``Administrator``, not ``SOMEDOMAIN\\Administrator``).
Note that while this class outputs lower-case hexadecimal digests,
it will accept upper-case digests as well.
"""
name = "msdcc"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret, self.user)).decode("ascii")
@classmethod
def raw(cls, secret, user):
"""encode password using mscash v1 algorithm
:arg secret: secret as unicode or utf-8 encoded bytes
:arg user: username to use as salt
:returns: returns string of raw bytes
"""
secret = to_unicode(secret, "utf-8", param="secret").encode("utf-16-le")
user = to_unicode(user, "utf-8", param="user").lower().encode("utf-16-le")
return md4(md4(secret).digest() + user).digest()
#=============================================================================
# msdcc2 aka mscash2
#=============================================================================
class msdcc2(uh.HasUserContext, uh.StaticHandler):
"""This class implements version 2 of Microsoft's Domain Cached Credentials
password hash, and follows the :ref:`password-hash-api`.
It has a fixed number of rounds, and uses the associated
username as the salt.
The :meth:`~zdppy_password_hash.ifc.PasswordHash.hash`, :meth:`~zdppy_password_hash.ifc.PasswordHash.genhash`, and :meth:`~zdppy_password_hash.ifc.PasswordHash.verify` methods
have the following extra keyword:
:type user: str
:param user:
String containing name of user account this password is associated with.
This is required to properly calculate the hash.
This keyword is case-insensitive, and should contain just the username
(e.g. ``Administrator``, not ``SOMEDOMAIN\\Administrator``).
"""
name = "msdcc2"
checksum_chars = uh.HEX_CHARS
checksum_size = 32
@classmethod
def _norm_hash(cls, hash):
return hash.lower()
def _calc_checksum(self, secret):
return hexlify(self.raw(secret, self.user)).decode("ascii")
@classmethod
def raw(cls, secret, user):
"""encode password using msdcc v2 algorithm
:type secret: unicode or utf-8 bytes
:arg secret: secret
:type user: str
:arg user: username to use as salt
:returns: returns string of raw bytes
"""
from zdppy_password_hash.crypto.digest import pbkdf2_hmac
secret = to_unicode(secret, "utf-8", param="secret").encode("utf-16-le")
user = to_unicode(user, "utf-8", param="user").lower().encode("utf-16-le")
tmp = md4(md4(secret).digest() + user).digest()
return pbkdf2_hmac("sha1", tmp, user, 10240, 16)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/handlers/windows.py | windows.py |
from __future__ import absolute_import, division, print_function
from base64 import (
b64encode,
b64decode,
b32decode as _b32decode,
b32encode as _b32encode,
)
from binascii import b2a_base64, a2b_base64, Error as _BinAsciiError
import logging
log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.utils.compat import (
PY3, bascii_to_str,
irange, imap, iter_byte_chars, join_byte_values, join_byte_elems,
nextgetter, suppress_cause,
u, unicode, unicode_or_bytes_types,
)
from zdppy_password_hash.utils.decor import memoized_property
# from zdppy_password_hash.utils import BASE64_CHARS, HASH64_CHARS
# local
__all__ = [
# constants
"BASE64_CHARS", "PADDED_BASE64_CHARS",
"AB64_CHARS",
"HASH64_CHARS",
"BCRYPT_CHARS",
"HEX_CHARS", "LOWER_HEX_CHARS", "UPPER_HEX_CHARS",
"ALL_BYTE_VALUES",
# misc
"compile_byte_translation",
# base64
'ab64_encode', 'ab64_decode',
'b64s_encode', 'b64s_decode',
# base32
"b32encode", "b32decode",
# custom encodings
'Base64Engine',
'LazyBase64Engine',
'h64',
'h64big',
'bcrypt64',
]
#=============================================================================
# constant strings
#=============================================================================
#-------------------------------------------------------------
# common salt_chars & checksum_chars values
#-------------------------------------------------------------
#: standard base64 charmap
BASE64_CHARS = u("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
#: alt base64 charmap -- "." instead of "+"
AB64_CHARS = u("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789./")
#: charmap used by HASH64 encoding.
HASH64_CHARS = u("./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
#: charmap used by BCrypt
BCRYPT_CHARS = u("./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
#: std base64 chars + padding char
PADDED_BASE64_CHARS = BASE64_CHARS + u("=")
#: all hex chars
HEX_CHARS = u("0123456789abcdefABCDEF")
#: upper case hex chars
UPPER_HEX_CHARS = u("0123456789ABCDEF")
#: lower case hex chars
LOWER_HEX_CHARS = u("0123456789abcdef")
#-------------------------------------------------------------
# byte strings
#-------------------------------------------------------------
#: special byte string containing all possible byte values
#: NOTE: for efficiency, this is treated as singleton by some of the code
ALL_BYTE_VALUES = join_byte_values(irange(256))
#: some string constants we reuse
B_EMPTY = b''
B_NULL = b'\x00'
B_EQUAL = b'='
#=============================================================================
# byte translation
#=============================================================================
#: base list used to compile byte translations
_TRANSLATE_SOURCE = list(iter_byte_chars(ALL_BYTE_VALUES))
def compile_byte_translation(mapping, source=None):
"""
return a 256-byte string for translating bytes using specified mapping.
bytes not specified by mapping will be left alone.
:param mapping:
dict mapping input byte (str or int) -> output byte (str or int).
:param source:
optional existing byte translation string to use as base.
(must be 255-length byte string). defaults to identity mapping.
:returns:
255-length byte string for passing to bytes().translate.
"""
if source is None:
target = _TRANSLATE_SOURCE[:]
else:
assert isinstance(source, bytes) and len(source) == 255
target = list(iter_byte_chars(source))
for k, v in mapping.items():
if isinstance(k, unicode_or_bytes_types):
k = ord(k)
assert isinstance(k, int) and 0 <= k < 256
if isinstance(v, unicode):
v = v.encode("ascii")
assert isinstance(v, bytes) and len(v) == 1
target[k] = v
return B_EMPTY.join(target)
#=============================================================================
# unpadding / stripped base64 encoding
#=============================================================================
def b64s_encode(data):
"""
encode using shortened base64 format which omits padding & whitespace.
uses default ``+/`` altchars.
"""
return b2a_base64(data).rstrip(_BASE64_STRIP)
def b64s_decode(data):
"""
decode from shortened base64 format which omits padding & whitespace.
uses default ``+/`` altchars.
"""
if isinstance(data, unicode):
# needs bytes for replace() call, but want to accept ascii-unicode ala a2b_base64()
try:
data = data.encode("ascii")
except UnicodeEncodeError:
raise suppress_cause(ValueError("string argument should contain only ASCII characters"))
off = len(data) & 3
if off == 0:
pass
elif off == 2:
data += _BASE64_PAD2
elif off == 3:
data += _BASE64_PAD1
else: # off == 1
raise ValueError("invalid base64 input")
try:
return a2b_base64(data)
except _BinAsciiError as err:
raise suppress_cause(TypeError(err))
#=============================================================================
# adapted-base64 encoding
#=============================================================================
_BASE64_STRIP = b"=\n"
_BASE64_PAD1 = b"="
_BASE64_PAD2 = b"=="
# XXX: Passlib 1.8/1.9 -- deprecate everything that's using ab64_encode(),
# have it start outputing b64s_encode() instead? can use a64_decode() to retain backwards compat.
def ab64_encode(data):
"""
encode using shortened base64 format which omits padding & whitespace.
uses custom ``./`` altchars.
it is primarily used by Passlib's custom pbkdf2 hashes.
"""
return b64s_encode(data).replace(b"+", b".")
def ab64_decode(data):
"""
decode from shortened base64 format which omits padding & whitespace.
uses custom ``./`` altchars, but supports decoding normal ``+/`` altchars as well.
it is primarily used by Passlib's custom pbkdf2 hashes.
"""
if isinstance(data, unicode):
# needs bytes for replace() call, but want to accept ascii-unicode ala a2b_base64()
try:
data = data.encode("ascii")
except UnicodeEncodeError:
raise suppress_cause(ValueError("string argument should contain only ASCII characters"))
return b64s_decode(data.replace(b".", b"+"))
#=============================================================================
# base32 codec
#=============================================================================
def b32encode(source):
"""
wrapper around :func:`base64.b32encode` which strips padding,
and returns a native string.
"""
# NOTE: using upper case by default here, since 'I & L' are less
# visually ambiguous than 'i & l'
return bascii_to_str(_b32encode(source).rstrip(B_EQUAL))
#: byte translation map to replace common mistyped base32 chars.
#: XXX: could correct '1' -> 'I', but could be a mistyped lower-case 'l', so leaving it alone.
_b32_translate = compile_byte_translation({"8": "B", "0": "O"})
#: helper to add padding
_b32_decode_pad = B_EQUAL * 8
def b32decode(source):
"""
wrapper around :func:`base64.b32decode`
which handles common mistyped chars.
padding optional, ignored if present.
"""
# encode & correct for typos
if isinstance(source, unicode):
source = source.encode("ascii")
source = source.translate(_b32_translate)
# pad things so final string is multiple of 8
remainder = len(source) & 0x7
if remainder:
source += _b32_decode_pad[:-remainder]
# XXX: py27 stdlib's version of this has some inefficiencies,
# could look into using optimized version.
return _b32decode(source, True)
#=============================================================================
# base64-variant encoding
#=============================================================================
class Base64Engine(object):
"""Provides routines for encoding/decoding base64 data using
arbitrary character mappings, selectable endianness, etc.
:arg charmap:
A string of 64 unique characters,
which will be used to encode successive 6-bit chunks of data.
A character's position within the string should correspond
to its 6-bit value.
:param big:
Whether the encoding should be big-endian (default False).
.. note::
This class does not currently handle base64's padding characters
in any way what so ever.
Raw Bytes <-> Encoded Bytes
===========================
The following methods convert between raw bytes,
and strings encoded using the engine's specific base64 variant:
.. automethod:: encode_bytes
.. automethod:: decode_bytes
.. automethod:: encode_transposed_bytes
.. automethod:: decode_transposed_bytes
..
.. automethod:: check_repair_unused
.. automethod:: repair_unused
Integers <-> Encoded Bytes
==========================
The following methods allow encoding and decoding
unsigned integers to and from the engine's specific base64 variant.
Endianess is determined by the engine's ``big`` constructor keyword.
.. automethod:: encode_int6
.. automethod:: decode_int6
.. automethod:: encode_int12
.. automethod:: decode_int12
.. automethod:: encode_int24
.. automethod:: decode_int24
.. automethod:: encode_int64
.. automethod:: decode_int64
Informational Attributes
========================
.. attribute:: charmap
unicode string containing list of characters used in encoding;
position in string matches 6bit value of character.
.. attribute:: bytemap
bytes version of :attr:`charmap`
.. attribute:: big
boolean flag indicating this using big-endian encoding.
"""
#===================================================================
# instance attrs
#===================================================================
# public config
bytemap = None # charmap as bytes
big = None # little or big endian
# filled in by init based on charmap.
# (byte elem: single byte under py2, 8bit int under py3)
_encode64 = None # maps 6bit value -> byte elem
_decode64 = None # maps byte elem -> 6bit value
# helpers filled in by init based on endianness
_encode_bytes = None # throws IndexError if bad value (shouldn't happen)
_decode_bytes = None # throws KeyError if bad char.
#===================================================================
# init
#===================================================================
def __init__(self, charmap, big=False):
# validate charmap, generate encode64/decode64 helper functions.
if isinstance(charmap, unicode):
charmap = charmap.encode("latin-1")
elif not isinstance(charmap, bytes):
raise exc.ExpectedStringError(charmap, "charmap")
if len(charmap) != 64:
raise ValueError("charmap must be 64 characters in length")
if len(set(charmap)) != 64:
raise ValueError("charmap must not contain duplicate characters")
self.bytemap = charmap
self._encode64 = charmap.__getitem__
lookup = dict((value, idx) for idx, value in enumerate(charmap))
self._decode64 = lookup.__getitem__
# validate big, set appropriate helper functions.
self.big = big
if big:
self._encode_bytes = self._encode_bytes_big
self._decode_bytes = self._decode_bytes_big
else:
self._encode_bytes = self._encode_bytes_little
self._decode_bytes = self._decode_bytes_little
# TODO: support padding character
##if padding is not None:
## if isinstance(padding, unicode):
## padding = padding.encode("latin-1")
## elif not isinstance(padding, bytes):
## raise TypeError("padding char must be unicode or bytes")
## if len(padding) != 1:
## raise ValueError("padding must be single character")
##self.padding = padding
@property
def charmap(self):
"""charmap as unicode"""
return self.bytemap.decode("latin-1")
#===================================================================
# encoding byte strings
#===================================================================
def encode_bytes(self, source):
"""encode bytes to base64 string.
:arg source: byte string to encode.
:returns: byte string containing encoded data.
"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
chunks, tail = divmod(len(source), 3)
if PY3:
next_value = nextgetter(iter(source))
else:
next_value = nextgetter(ord(elem) for elem in source)
gen = self._encode_bytes(next_value, chunks, tail)
out = join_byte_elems(imap(self._encode64, gen))
##if tail:
## padding = self.padding
## if padding:
## out += padding * (3-tail)
return out
def _encode_bytes_little(self, next_value, chunks, tail):
"""helper used by encode_bytes() to handle little-endian encoding"""
#
# output bit layout:
#
# first byte: v1 543210
#
# second byte: v1 ....76
# +v2 3210..
#
# third byte: v2 ..7654
# +v3 10....
#
# fourth byte: v3 765432
#
idx = 0
while idx < chunks:
v1 = next_value()
v2 = next_value()
v3 = next_value()
yield v1 & 0x3f
yield ((v2 & 0x0f)<<2)|(v1>>6)
yield ((v3 & 0x03)<<4)|(v2>>4)
yield v3>>2
idx += 1
if tail:
v1 = next_value()
if tail == 1:
# note: 4 msb of last byte are padding
yield v1 & 0x3f
yield v1>>6
else:
assert tail == 2
# note: 2 msb of last byte are padding
v2 = next_value()
yield v1 & 0x3f
yield ((v2 & 0x0f)<<2)|(v1>>6)
yield v2>>4
def _encode_bytes_big(self, next_value, chunks, tail):
"""helper used by encode_bytes() to handle big-endian encoding"""
#
# output bit layout:
#
# first byte: v1 765432
#
# second byte: v1 10....
# +v2 ..7654
#
# third byte: v2 3210..
# +v3 ....76
#
# fourth byte: v3 543210
#
idx = 0
while idx < chunks:
v1 = next_value()
v2 = next_value()
v3 = next_value()
yield v1>>2
yield ((v1&0x03)<<4)|(v2>>4)
yield ((v2&0x0f)<<2)|(v3>>6)
yield v3 & 0x3f
idx += 1
if tail:
v1 = next_value()
if tail == 1:
# note: 4 lsb of last byte are padding
yield v1>>2
yield (v1&0x03)<<4
else:
assert tail == 2
# note: 2 lsb of last byte are padding
v2 = next_value()
yield v1>>2
yield ((v1&0x03)<<4)|(v2>>4)
yield ((v2&0x0f)<<2)
#===================================================================
# decoding byte strings
#===================================================================
def decode_bytes(self, source):
"""decode bytes from base64 string.
:arg source: byte string to decode.
:returns: byte string containing decoded data.
"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
##padding = self.padding
##if padding:
## # TODO: add padding size check?
## source = source.rstrip(padding)
chunks, tail = divmod(len(source), 4)
if tail == 1:
# only 6 bits left, can't encode a whole byte!
raise ValueError("input string length cannot be == 1 mod 4")
next_value = nextgetter(imap(self._decode64, source))
try:
return join_byte_values(self._decode_bytes(next_value, chunks, tail))
except KeyError as err:
raise ValueError("invalid character: %r" % (err.args[0],))
def _decode_bytes_little(self, next_value, chunks, tail):
"""helper used by decode_bytes() to handle little-endian encoding"""
#
# input bit layout:
#
# first byte: v1 ..543210
# +v2 10......
#
# second byte: v2 ....5432
# +v3 3210....
#
# third byte: v3 ......54
# +v4 543210..
#
idx = 0
while idx < chunks:
v1 = next_value()
v2 = next_value()
v3 = next_value()
v4 = next_value()
yield v1 | ((v2 & 0x3) << 6)
yield (v2>>2) | ((v3 & 0xF) << 4)
yield (v3>>4) | (v4<<2)
idx += 1
if tail:
# tail is 2 or 3
v1 = next_value()
v2 = next_value()
yield v1 | ((v2 & 0x3) << 6)
# NOTE: if tail == 2, 4 msb of v2 are ignored (should be 0)
if tail == 3:
# NOTE: 2 msb of v3 are ignored (should be 0)
v3 = next_value()
yield (v2>>2) | ((v3 & 0xF) << 4)
def _decode_bytes_big(self, next_value, chunks, tail):
"""helper used by decode_bytes() to handle big-endian encoding"""
#
# input bit layout:
#
# first byte: v1 543210..
# +v2 ......54
#
# second byte: v2 3210....
# +v3 ....5432
#
# third byte: v3 10......
# +v4 ..543210
#
idx = 0
while idx < chunks:
v1 = next_value()
v2 = next_value()
v3 = next_value()
v4 = next_value()
yield (v1<<2) | (v2>>4)
yield ((v2&0xF)<<4) | (v3>>2)
yield ((v3&0x3)<<6) | v4
idx += 1
if tail:
# tail is 2 or 3
v1 = next_value()
v2 = next_value()
yield (v1<<2) | (v2>>4)
# NOTE: if tail == 2, 4 lsb of v2 are ignored (should be 0)
if tail == 3:
# NOTE: 2 lsb of v3 are ignored (should be 0)
v3 = next_value()
yield ((v2&0xF)<<4) | (v3>>2)
#===================================================================
# encode/decode helpers
#===================================================================
# padmap2/3 - dict mapping last char of string ->
# equivalent char with no padding bits set.
def __make_padset(self, bits):
"""helper to generate set of valid last chars & bytes"""
pset = set(c for i,c in enumerate(self.bytemap) if not i & bits)
pset.update(c for i,c in enumerate(self.charmap) if not i & bits)
return frozenset(pset)
@memoized_property
def _padinfo2(self):
"""mask to clear padding bits, and valid last bytes (for strings 2 % 4)"""
# 4 bits of last char unused (lsb for big, msb for little)
bits = 15 if self.big else (15<<2)
return ~bits, self.__make_padset(bits)
@memoized_property
def _padinfo3(self):
"""mask to clear padding bits, and valid last bytes (for strings 3 % 4)"""
# 2 bits of last char unused (lsb for big, msb for little)
bits = 3 if self.big else (3<<4)
return ~bits, self.__make_padset(bits)
def check_repair_unused(self, source):
"""helper to detect & clear invalid unused bits in last character.
:arg source:
encoded data (as ascii bytes or unicode).
:returns:
`(True, result)` if the string was repaired,
`(False, source)` if the string was ok as-is.
"""
# figure out how many padding bits there are in last char.
tail = len(source) & 3
if tail == 2:
mask, padset = self._padinfo2
elif tail == 3:
mask, padset = self._padinfo3
elif not tail:
return False, source
else:
raise ValueError("source length must != 1 mod 4")
# check if last char is ok (padset contains bytes & unicode versions)
last = source[-1]
if last in padset:
return False, source
# we have dirty bits - repair the string by decoding last char,
# clearing the padding bits via <mask>, and encoding new char.
if isinstance(source, unicode):
cm = self.charmap
last = cm[cm.index(last) & mask]
assert last in padset, "failed to generate valid padding char"
else:
# NOTE: this assumes ascii-compat encoding, and that
# all chars used by encoding are 7-bit ascii.
last = self._encode64(self._decode64(last) & mask)
assert last in padset, "failed to generate valid padding char"
if PY3:
last = bytes([last])
return True, source[:-1] + last
def repair_unused(self, source):
return self.check_repair_unused(source)[1]
##def transcode(self, source, other):
## return ''.join(
## other.charmap[self.charmap.index(char)]
## for char in source
## )
##def random_encoded_bytes(self, size, random=None, unicode=False):
## "return random encoded string of given size"
## data = getrandstr(random or rng,
## self.charmap if unicode else self.bytemap, size)
## return self.repair_unused(data)
#===================================================================
# transposed encoding/decoding
#===================================================================
def encode_transposed_bytes(self, source, offsets):
"""encode byte string, first transposing source using offset list"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
tmp = join_byte_elems(source[off] for off in offsets)
return self.encode_bytes(tmp)
def decode_transposed_bytes(self, source, offsets):
"""decode byte string, then reverse transposition described by offset list"""
# NOTE: if transposition does not use all bytes of source,
# the original can't be recovered... and join_byte_elems() will throw
# an error because 1+ values in <buf> will be None.
tmp = self.decode_bytes(source)
buf = [None] * len(offsets)
for off, char in zip(offsets, tmp):
buf[off] = char
return join_byte_elems(buf)
#===================================================================
# integer decoding helpers - mainly used by des_crypt family
#===================================================================
def _decode_int(self, source, bits):
"""decode base64 string -> integer
:arg source: base64 string to decode.
:arg bits: number of bits in resulting integer.
:raises ValueError:
* if the string contains invalid base64 characters.
* if the string is not long enough - it must be at least
``int(ceil(bits/6))`` in length.
:returns:
a integer in the range ``0 <= n < 2**bits``
"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
big = self.big
pad = -bits % 6
chars = (bits+pad)/6
if len(source) != chars:
raise ValueError("source must be %d chars" % (chars,))
decode = self._decode64
out = 0
try:
for c in source if big else reversed(source):
out = (out<<6) + decode(c)
except KeyError:
raise ValueError("invalid character in string: %r" % (c,))
if pad:
# strip padding bits
if big:
out >>= pad
else:
out &= (1<<bits)-1
return out
#---------------------------------------------------------------
# optimized versions for common integer sizes
#---------------------------------------------------------------
def decode_int6(self, source):
"""decode single character -> 6 bit integer"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
if len(source) != 1:
raise ValueError("source must be exactly 1 byte")
if PY3:
# convert to 8bit int before doing lookup
source = source[0]
try:
return self._decode64(source)
except KeyError:
raise ValueError("invalid character")
def decode_int12(self, source):
"""decodes 2 char string -> 12-bit integer"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
if len(source) != 2:
raise ValueError("source must be exactly 2 bytes")
decode = self._decode64
try:
if self.big:
return decode(source[1]) + (decode(source[0])<<6)
else:
return decode(source[0]) + (decode(source[1])<<6)
except KeyError:
raise ValueError("invalid character")
def decode_int24(self, source):
"""decodes 4 char string -> 24-bit integer"""
if not isinstance(source, bytes):
raise TypeError("source must be bytes, not %s" % (type(source),))
if len(source) != 4:
raise ValueError("source must be exactly 4 bytes")
decode = self._decode64
try:
if self.big:
return decode(source[3]) + (decode(source[2])<<6)+ \
(decode(source[1])<<12) + (decode(source[0])<<18)
else:
return decode(source[0]) + (decode(source[1])<<6)+ \
(decode(source[2])<<12) + (decode(source[3])<<18)
except KeyError:
raise ValueError("invalid character")
def decode_int30(self, source):
"""decode 5 char string -> 30 bit integer"""
return self._decode_int(source, 30)
def decode_int64(self, source):
"""decode 11 char base64 string -> 64-bit integer
this format is used primarily by des-crypt & variants to encode
the DES output value used as a checksum.
"""
return self._decode_int(source, 64)
#===================================================================
# integer encoding helpers - mainly used by des_crypt family
#===================================================================
def _encode_int(self, value, bits):
"""encode integer into base64 format
:arg value: non-negative integer to encode
:arg bits: number of bits to encode
:returns:
a string of length ``int(ceil(bits/6.0))``.
"""
assert value >= 0, "caller did not sanitize input"
pad = -bits % 6
bits += pad
if self.big:
itr = irange(bits-6, -6, -6)
# shift to add lsb padding.
value <<= pad
else:
itr = irange(0, bits, 6)
# padding is msb, so no change needed.
return join_byte_elems(imap(self._encode64,
((value>>off) & 0x3f for off in itr)))
#---------------------------------------------------------------
# optimized versions for common integer sizes
#---------------------------------------------------------------
def encode_int6(self, value):
"""encodes 6-bit integer -> single hash64 character"""
if value < 0 or value > 63:
raise ValueError("value out of range")
if PY3:
return self.bytemap[value:value+1]
else:
return self._encode64(value)
def encode_int12(self, value):
"""encodes 12-bit integer -> 2 char string"""
if value < 0 or value > 0xFFF:
raise ValueError("value out of range")
raw = [value & 0x3f, (value>>6) & 0x3f]
if self.big:
raw = reversed(raw)
return join_byte_elems(imap(self._encode64, raw))
def encode_int24(self, value):
"""encodes 24-bit integer -> 4 char string"""
if value < 0 or value > 0xFFFFFF:
raise ValueError("value out of range")
raw = [value & 0x3f, (value>>6) & 0x3f,
(value>>12) & 0x3f, (value>>18) & 0x3f]
if self.big:
raw = reversed(raw)
return join_byte_elems(imap(self._encode64, raw))
def encode_int30(self, value):
"""decode 5 char string -> 30 bit integer"""
if value < 0 or value > 0x3fffffff:
raise ValueError("value out of range")
return self._encode_int(value, 30)
def encode_int64(self, value):
"""encode 64-bit integer -> 11 char hash64 string
this format is used primarily by des-crypt & variants to encode
the DES output value used as a checksum.
"""
if value < 0 or value > 0xffffffffffffffff:
raise ValueError("value out of range")
return self._encode_int(value, 64)
#===================================================================
# eof
#===================================================================
class LazyBase64Engine(Base64Engine):
"""Base64Engine which delays initialization until it's accessed"""
_lazy_opts = None
def __init__(self, *args, **kwds):
self._lazy_opts = (args, kwds)
def _lazy_init(self):
args, kwds = self._lazy_opts
super(LazyBase64Engine, self).__init__(*args, **kwds)
del self._lazy_opts
self.__class__ = Base64Engine
def __getattribute__(self, attr):
if not attr.startswith("_"):
self._lazy_init()
return object.__getattribute__(self, attr)
#-------------------------------------------------------------
# common variants
#-------------------------------------------------------------
h64 = LazyBase64Engine(HASH64_CHARS)
h64big = LazyBase64Engine(HASH64_CHARS, big=True)
bcrypt64 = LazyBase64Engine(BCRYPT_CHARS, big=True)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/binary.py | binary.py |
from __future__ import division
# core
import logging; log = logging.getLogger(__name__)
# site
# pkg
from zdppy_password_hash.exc import ExpectedTypeError
from zdppy_password_hash.utils.decor import deprecated_function
from zdppy_password_hash.utils.compat import native_string_types
from zdppy_password_hash.crypto.digest import norm_hash_name, lookup_hash, pbkdf1 as _pbkdf1, pbkdf2_hmac, compile_hmac
# local
__all__ = [
# hash utils
"norm_hash_name",
# prf utils
"get_prf",
# kdfs
"pbkdf1",
"pbkdf2",
]
#=============================================================================
# issue deprecation warning for module
#=============================================================================
from warnings import warn
warn("the module 'zdppy_password_hash.utils.pbkdf2' is deprecated as of Passlib 1.7, "
"and will be removed in Passlib 2.0, please use 'zdppy_password_hash.crypto' instead",
DeprecationWarning)
#=============================================================================
# hash helpers
#=============================================================================
norm_hash_name = deprecated_function(deprecated="1.7", removed="1.8", func_module=__name__,
replacement="zdppy_password_hash.crypto.digest.norm_hash_name")(norm_hash_name)
#=============================================================================
# prf lookup
#=============================================================================
#: cache mapping prf name/func -> (func, digest_size)
_prf_cache = {}
#: list of accepted prefixes
_HMAC_PREFIXES = ("hmac_", "hmac-")
def get_prf(name):
"""Lookup pseudo-random family (PRF) by name.
:arg name:
This must be the name of a recognized prf.
Currently this only recognizes names with the format
:samp:`hmac-{digest}`, where :samp:`{digest}`
is the name of a hash function such as
``md5``, ``sha256``, etc.
todo: restore text about callables.
:raises ValueError: if the name is not known
:raises TypeError: if the name is not a callable or string
:returns:
a tuple of :samp:`({prf_func}, {digest_size})`, where:
* :samp:`{prf_func}` is a function implementing
the specified PRF, and has the signature
``prf_func(secret, message) -> digest``.
* :samp:`{digest_size}` is an integer indicating
the number of bytes the function returns.
Usage example::
>>> from zdppy_password_hash.utils.pbkdf2 import get_prf
>>> hmac_sha256, dsize = get_prf("hmac-sha256")
>>> hmac_sha256
<function hmac_sha256 at 0x1e37c80>
>>> dsize
32
>>> digest = hmac_sha256('password', 'message')
.. deprecated:: 1.7
This function is deprecated, and will be removed in Passlib 2.0.
This only related replacement is :func:`zdppy_password_hash.crypto.digest.compile_hmac`.
"""
global _prf_cache
if name in _prf_cache:
return _prf_cache[name]
if isinstance(name, native_string_types):
if not name.startswith(_HMAC_PREFIXES):
raise ValueError("unknown prf algorithm: %r" % (name,))
digest = lookup_hash(name[5:]).name
def hmac(key, msg):
return compile_hmac(digest, key)(msg)
record = (hmac, hmac.digest_info.digest_size)
elif callable(name):
# assume it's a callable, use it directly
digest_size = len(name(b'x', b'y'))
record = (name, digest_size)
else:
raise ExpectedTypeError(name, "str or callable", "prf name")
_prf_cache[name] = record
return record
#=============================================================================
# pbkdf1 support
#=============================================================================
def pbkdf1(secret, salt, rounds, keylen=None, hash="sha1"):
"""pkcs#5 password-based key derivation v1.5
:arg secret: passphrase to use to generate key
:arg salt: salt string to use when generating key
:param rounds: number of rounds to use to generate key
:arg keylen: number of bytes to generate (if ``None``, uses digest's native size)
:param hash:
hash function to use. must be name of a hash recognized by hashlib.
:returns:
raw bytes of generated key
.. note::
This algorithm has been deprecated, new code should use PBKDF2.
Among other limitations, ``keylen`` cannot be larger
than the digest size of the specified hash.
.. deprecated:: 1.7
This has been relocated to :func:`zdppy_password_hash.crypto.digest.pbkdf1`,
and this version will be removed in Passlib 2.0.
*Note the call signature has changed.*
"""
return _pbkdf1(hash, secret, salt, rounds, keylen)
#=============================================================================
# pbkdf2
#=============================================================================
def pbkdf2(secret, salt, rounds, keylen=None, prf="hmac-sha1"):
"""pkcs#5 password-based key derivation v2.0
:arg secret:
passphrase to use to generate key
:arg salt:
salt string to use when generating key
:param rounds:
number of rounds to use to generate key
:arg keylen:
number of bytes to generate.
if set to ``None``, will use digest size of selected prf.
:param prf:
psuedo-random family to use for key strengthening.
this must be a string starting with ``"hmac-"``, followed by the name of a known digest.
this defaults to ``"hmac-sha1"`` (the only prf explicitly listed in
the PBKDF2 specification)
.. rst-class:: warning
.. versionchanged 1.7:
This argument no longer supports arbitrary PRF callables --
These were rarely / never used, and created too many unwanted codepaths.
:returns:
raw bytes of generated key
.. deprecated:: 1.7
This has been deprecated in favor of :func:`zdppy_password_hash.crypto.digest.pbkdf2_hmac`,
and will be removed in Passlib 2.0. *Note the call signature has changed.*
"""
if callable(prf) or (isinstance(prf, native_string_types) and not prf.startswith(_HMAC_PREFIXES)):
raise NotImplementedError("non-HMAC prfs are not supported as of Passlib 1.7")
digest = prf[5:]
return pbkdf2_hmac(digest, secret, salt, rounds, keylen)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/pbkdf2.py | pbkdf2.py |
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
from functools import wraps, update_wrapper
import types
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils.compat import PY3
# local
__all__ = [
"classproperty",
"hybrid_method",
"memoize_single_value",
"memoized_property",
"deprecated_function",
"deprecated_method",
]
#=============================================================================
# class-level decorators
#=============================================================================
class classproperty(object):
"""Function decorator which acts like a combination of classmethod+property (limited to read-only properties)"""
def __init__(self, func):
self.im_func = func
def __get__(self, obj, cls):
return self.im_func(cls)
@property
def __func__(self):
"""py3 compatible alias"""
return self.im_func
class hybrid_method(object):
"""
decorator which invokes function with class if called as class method,
and with object if called at instance level.
"""
def __init__(self, func):
self.func = func
update_wrapper(self, func)
def __get__(self, obj, cls):
if obj is None:
obj = cls
if PY3:
return types.MethodType(self.func, obj)
else:
return types.MethodType(self.func, obj, cls)
#=============================================================================
# memoization
#=============================================================================
def memoize_single_value(func):
"""
decorator for function which takes no args,
and memoizes result. exposes a ``.clear_cache`` method
to clear the cached value.
"""
cache = {}
@wraps(func)
def wrapper():
try:
return cache[True]
except KeyError:
pass
value = cache[True] = func()
return value
def clear_cache():
cache.pop(True, None)
wrapper.clear_cache = clear_cache
return wrapper
class memoized_property(object):
"""
decorator which invokes method once, then replaces attr with result
"""
def __init__(self, func):
self.__func__ = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, cls):
if obj is None:
return self
value = self.__func__(obj)
setattr(obj, self.__name__, value)
return value
if not PY3:
@property
def im_func(self):
"""py2 alias"""
return self.__func__
def clear_cache(self, obj):
"""
class-level helper to clear stored value (if any).
usage: :samp:`type(self).{attr}.clear_cache(self)`
"""
obj.__dict__.pop(self.__name__, None)
def peek_cache(self, obj, default=None):
"""
class-level helper to peek at stored value
usage: :samp:`value = type(self).{attr}.clear_cache(self)`
"""
return obj.__dict__.get(self.__name__, default)
# works but not used
##class memoized_class_property(object):
## """function decorator which calls function as classmethod,
## and replaces itself with result for current and all future invocations.
## """
## def __init__(self, func):
## self.im_func = func
##
## def __get__(self, obj, cls):
## func = self.im_func
## value = func(cls)
## setattr(cls, func.__name__, value)
## return value
##
## @property
## def __func__(self):
## "py3 compatible alias"
#=============================================================================
# deprecation
#=============================================================================
def deprecated_function(msg=None, deprecated=None, removed=None, updoc=True,
replacement=None, _is_method=False,
func_module=None):
"""decorator to deprecate a function.
:arg msg: optional msg, default chosen if omitted
:kwd deprecated: version when function was first deprecated
:kwd removed: version when function will be removed
:kwd replacement: alternate name / instructions for replacing this function.
:kwd updoc: add notice to docstring (default ``True``)
"""
if msg is None:
if _is_method:
msg = "the method %(mod)s.%(klass)s.%(name)s() is deprecated"
else:
msg = "the function %(mod)s.%(name)s() is deprecated"
if deprecated:
msg += " as of Passlib %(deprecated)s"
if removed:
msg += ", and will be removed in Passlib %(removed)s"
if replacement:
msg += ", use %s instead" % replacement
msg += "."
def build(func):
is_classmethod = _is_method and isinstance(func, classmethod)
if is_classmethod:
# NOTE: PY26 doesn't support "classmethod().__func__" directly...
func = func.__get__(None, type).__func__
opts = dict(
mod=func_module or func.__module__,
name=func.__name__,
deprecated=deprecated,
removed=removed,
)
if _is_method:
def wrapper(*args, **kwds):
tmp = opts.copy()
klass = args[0] if is_classmethod else args[0].__class__
tmp.update(klass=klass.__name__, mod=klass.__module__)
warn(msg % tmp, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
else:
text = msg % opts
def wrapper(*args, **kwds):
warn(text, DeprecationWarning, stacklevel=2)
return func(*args, **kwds)
update_wrapper(wrapper, func)
if updoc and (deprecated or removed) and \
wrapper.__doc__ and ".. deprecated::" not in wrapper.__doc__:
txt = deprecated or ''
if removed or replacement:
txt += "\n "
if removed:
txt += "and will be removed in version %s" % (removed,)
if replacement:
if removed:
txt += ", "
txt += "use %s instead" % replacement
txt += "."
if not wrapper.__doc__.strip(" ").endswith("\n"):
wrapper.__doc__ += "\n"
wrapper.__doc__ += "\n.. deprecated:: %s\n" % (txt,)
if is_classmethod:
wrapper = classmethod(wrapper)
return wrapper
return build
def deprecated_method(msg=None, deprecated=None, removed=None, updoc=True,
replacement=None):
"""decorator to deprecate a method.
:arg msg: optional msg, default chosen if omitted
:kwd deprecated: version when method was first deprecated
:kwd removed: version when method will be removed
:kwd replacement: alternate name / instructions for replacing this method.
:kwd updoc: add notice to docstring (default ``True``)
"""
return deprecated_function(msg, deprecated, removed, updoc, replacement,
_is_method=True)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/decor.py | decor.py |
#=============================================================================
# imports
#=============================================================================
from zdppy_password_hash.utils.compat import JYTHON
# core
from binascii import b2a_base64, a2b_base64, Error as _BinAsciiError
from base64 import b64encode, b64decode
try:
from collections.abc import Sequence
from collections.abc import Iterable
except ImportError:
# py2 compat
from collections import Sequence
from collections import Iterable
from codecs import lookup as _lookup_codec
from functools import update_wrapper
import itertools
import inspect
import logging; log = logging.getLogger(__name__)
import math
import os
import sys
import random
import re
if JYTHON: # pragma: no cover -- runtime detection
# Jython 2.5.2 lacks stringprep module -
# see http://bugs.jython.org/issue1758320
try:
import stringprep
except ImportError:
stringprep = None
_stringprep_missing_reason = "not present under Jython"
else:
import stringprep
import time
if stringprep:
import unicodedata
try:
import threading
except ImportError:
# module optional before py37
threading = None
import timeit
import types
from warnings import warn
# site
# pkg
from zdppy_password_hash.utils.binary import (
# [remove these aliases in 2.0]
BASE64_CHARS, AB64_CHARS, HASH64_CHARS, BCRYPT_CHARS,
Base64Engine, LazyBase64Engine, h64, h64big, bcrypt64,
ab64_encode, ab64_decode, b64s_encode, b64s_decode
)
from zdppy_password_hash.utils.decor import (
# [remove these aliases in 2.0]
deprecated_function,
deprecated_method,
memoized_property,
classproperty,
hybrid_method,
)
from zdppy_password_hash.exc import ExpectedStringError, ExpectedTypeError
from zdppy_password_hash.utils.compat import (add_doc, join_bytes, join_byte_values,
join_byte_elems, irange, imap, PY3, u,
join_unicode, unicode, byte_elem_value, nextgetter,
unicode_or_str, unicode_or_bytes_types,
get_method_function, suppress_cause, PYPY)
# local
__all__ = [
# constants
'JYTHON',
'sys_bits',
'unix_crypt_schemes',
'rounds_cost_values',
# unicode helpers
'consteq',
'saslprep',
# bytes helpers
"xor_bytes",
"render_bytes",
# encoding helpers
'is_same_codec',
'is_ascii_safe',
'to_bytes',
'to_unicode',
'to_native_str',
# host OS
'has_crypt',
'test_crypt',
'safe_crypt',
'tick',
# randomness
'rng',
'getrandbytes',
'getrandstr',
'generate_password',
# object type / interface tests
'is_crypt_handler',
'is_crypt_context',
'has_rounds_info',
'has_salt_info',
]
#=============================================================================
# constants
#=============================================================================
# bitsize of system architecture (32 or 64)
sys_bits = int(math.log(sys.maxsize if PY3 else sys.maxint, 2) + 1.5)
# list of hashes algs supported by crypt() on at least one OS.
# XXX: move to .registry for zdppy_password_hash 2.0?
unix_crypt_schemes = [
"sha512_crypt", "sha256_crypt",
"sha1_crypt", "bcrypt",
"md5_crypt",
# "bsd_nthash",
"bsdi_crypt", "des_crypt",
]
# list of rounds_cost constants
rounds_cost_values = [ "linear", "log2" ]
# legacy import, will be removed in 1.8
from zdppy_password_hash.exc import MissingBackendError
# internal helpers
_BEMPTY = b''
_UEMPTY = u("")
_USPACE = u(" ")
# maximum password size which zdppy_password_hash will allow; see exc.PasswordSizeError
MAX_PASSWORD_SIZE = int(os.environ.get("PASSLIB_MAX_PASSWORD_SIZE") or 4096)
#=============================================================================
# type helpers
#=============================================================================
class SequenceMixin(object):
"""
helper which lets result object act like a fixed-length sequence.
subclass just needs to provide :meth:`_as_tuple()`.
"""
def _as_tuple(self):
raise NotImplementedError("implement in subclass")
def __repr__(self):
return repr(self._as_tuple())
def __getitem__(self, idx):
return self._as_tuple()[idx]
def __iter__(self):
return iter(self._as_tuple())
def __len__(self):
return len(self._as_tuple())
def __eq__(self, other):
return self._as_tuple() == other
def __ne__(self, other):
return not self.__eq__(other)
if PY3:
# getargspec() is deprecated, use this under py3.
# even though it's a lot more awkward to get basic info :|
_VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD
_VAR_ANY_SET = set([_VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL])
def accepts_keyword(func, key):
"""test if function accepts specified keyword"""
params = inspect.signature(get_method_function(func)).parameters
if not params:
return False
arg = params.get(key)
if arg and arg.kind not in _VAR_ANY_SET:
return True
# XXX: annoying what we have to do to determine if VAR_KWDS in use.
return params[list(params)[-1]].kind == _VAR_KEYWORD
else:
def accepts_keyword(func, key):
"""test if function accepts specified keyword"""
spec = inspect.getargspec(get_method_function(func))
return key in spec.args or spec.keywords is not None
def update_mixin_classes(target, add=None, remove=None, append=False,
before=None, after=None, dryrun=False):
"""
helper to update mixin classes installed in target class.
:param target:
target class whose bases will be modified.
:param add:
class / classes to install into target's base class list.
:param remove:
class / classes to remove from target's base class list.
:param append:
by default, prepends mixins to front of list.
if True, appends to end of list instead.
:param after:
optionally make sure all mixins are inserted after
this class / classes.
:param before:
optionally make sure all mixins are inserted before
this class / classes.
:param dryrun:
optionally perform all calculations / raise errors,
but don't actually modify the class.
"""
if isinstance(add, type):
add = [add]
bases = list(target.__bases__)
# strip out requested mixins
if remove:
if isinstance(remove, type):
remove = [remove]
for mixin in remove:
if add and mixin in add:
continue
if mixin in bases:
bases.remove(mixin)
# add requested mixins
if add:
for mixin in add:
# if mixin already present (explicitly or not), leave alone
if any(issubclass(base, mixin) for base in bases):
continue
# determine insertion point
if append:
for idx, base in enumerate(bases):
if issubclass(mixin, base):
# don't insert mixin after one of it's own bases
break
if before and issubclass(base, before):
# don't insert mixin after any <before> classes.
break
else:
# append to end
idx = len(bases)
elif after:
for end_idx, base in enumerate(reversed(bases)):
if issubclass(base, after):
# don't insert mixin before any <after> classes.
idx = len(bases) - end_idx
assert bases[idx-1] == base
break
else:
idx = 0
else:
# insert at start
idx = 0
# insert mixin
bases.insert(idx, mixin)
# modify class
if not dryrun:
target.__bases__ = tuple(bases)
#=============================================================================
# collection helpers
#=============================================================================
def batch(source, size):
"""
split iterable into chunks of <size> elements.
"""
if size < 1:
raise ValueError("size must be positive integer")
if isinstance(source, Sequence):
end = len(source)
i = 0
while i < end:
n = i + size
yield source[i:n]
i = n
elif isinstance(source, Iterable):
itr = iter(source)
while True:
chunk_itr = itertools.islice(itr, size)
try:
first = next(chunk_itr)
except StopIteration:
break
yield itertools.chain((first,), chunk_itr)
else:
raise TypeError("source must be iterable")
#=============================================================================
# unicode helpers
#=============================================================================
# XXX: should this be moved to zdppy_password_hash.crypto, or compat backports?
def consteq(left, right):
"""Check two strings/bytes for equality.
This function uses an approach designed to prevent
timing analysis, making it appropriate for cryptography.
a and b must both be of the same type: either str (ASCII only),
or any type that supports the buffer protocol (e.g. bytes).
Note: If a and b are of different lengths, or if an error occurs,
a timing attack could theoretically reveal information about the
types and lengths of a and b--but not their values.
"""
# NOTE:
# resources & discussions considered in the design of this function:
# hmac timing attack --
# http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/
# python developer discussion surrounding similar function --
# http://bugs.python.org/issue15061
# http://bugs.python.org/issue14955
# validate types
if isinstance(left, unicode):
if not isinstance(right, unicode):
raise TypeError("inputs must be both unicode or both bytes")
is_py3_bytes = False
elif isinstance(left, bytes):
if not isinstance(right, bytes):
raise TypeError("inputs must be both unicode or both bytes")
is_py3_bytes = PY3
else:
raise TypeError("inputs must be both unicode or both bytes")
# do size comparison.
# NOTE: the double-if construction below is done deliberately, to ensure
# the same number of operations (including branches) is performed regardless
# of whether left & right are the same size.
same_size = (len(left) == len(right))
if same_size:
# if sizes are the same, setup loop to perform actual check of contents.
tmp = left
result = 0
if not same_size:
# if sizes aren't the same, set 'result' so equality will fail regardless
# of contents. then, to ensure we do exactly 'len(right)' iterations
# of the loop, just compare 'right' against itself.
tmp = right
result = 1
# run constant-time string comparision
# TODO: use izip instead (but first verify it's faster than zip for this case)
if is_py3_bytes:
for l,r in zip(tmp, right):
result |= l ^ r
else:
for l,r in zip(tmp, right):
result |= ord(l) ^ ord(r)
return result == 0
# keep copy of this around since stdlib's version throws error on non-ascii chars in unicode strings.
# our version does, but suffers from some underlying VM issues. but something is better than
# nothing for plaintext hashes, which need this. everything else should use consteq(),
# since the stdlib one is going to be as good / better in the general case.
str_consteq = consteq
try:
# for py3.3 and up, use the stdlib version
from hmac import compare_digest as consteq
except ImportError:
pass
# TODO: could check for cryptography package's version,
# but only operates on bytes, so would need a wrapper,
# or separate consteq() into a unicode & a bytes variant.
# from cryptography.hazmat.primitives.constant_time import bytes_eq as consteq
def splitcomma(source, sep=","):
"""split comma-separated string into list of elements,
stripping whitespace.
"""
source = source.strip()
if source.endswith(sep):
source = source[:-1]
if not source:
return []
return [ elem.strip() for elem in source.split(sep) ]
def saslprep(source, param="value"):
"""Normalizes unicode strings using SASLPrep stringprep profile.
The SASLPrep profile is defined in :rfc:`4013`.
It provides a uniform scheme for normalizing unicode usernames
and passwords before performing byte-value sensitive operations
such as hashing. Among other things, it normalizes diacritic
representations, removes non-printing characters, and forbids
invalid characters such as ``\\n``. Properly internationalized
applications should run user passwords through this function
before hashing.
:arg source:
unicode string to normalize & validate
:param param:
Optional noun identifying source parameter in error messages
(Defaults to the string ``"value"``). This is mainly useful to make the caller's error
messages make more sense contextually.
:raises ValueError:
if any characters forbidden by the SASLPrep profile are encountered.
:raises TypeError:
if input is not :class:`!unicode`
:returns:
normalized unicode string
.. note::
This function is not available under Jython,
as the Jython stdlib is missing the :mod:`!stringprep` module
(`Jython issue 1758320 <http://bugs.jython.org/issue1758320>`_).
.. versionadded:: 1.6
"""
# saslprep - http://tools.ietf.org/html/rfc4013
# stringprep - http://tools.ietf.org/html/rfc3454
# http://docs.python.org/library/stringprep.html
# validate type
# XXX: support bytes (e.g. run through want_unicode)?
# might be easier to just integrate this into cryptcontext.
if not isinstance(source, unicode):
raise TypeError("input must be unicode string, not %s" %
(type(source),))
# mapping stage
# - map non-ascii spaces to U+0020 (stringprep C.1.2)
# - strip 'commonly mapped to nothing' chars (stringprep B.1)
in_table_c12 = stringprep.in_table_c12
in_table_b1 = stringprep.in_table_b1
data = join_unicode(
_USPACE if in_table_c12(c) else c
for c in source
if not in_table_b1(c)
)
# normalize to KC form
data = unicodedata.normalize('NFKC', data)
if not data:
return _UEMPTY
# check for invalid bi-directional strings.
# stringprep requires the following:
# - chars in C.8 must be prohibited.
# - if any R/AL chars in string:
# - no L chars allowed in string
# - first and last must be R/AL chars
# this checks if start/end are R/AL chars. if so, prohibited loop
# will forbid all L chars. if not, prohibited loop will forbid all
# R/AL chars instead. in both cases, prohibited loop takes care of C.8.
is_ral_char = stringprep.in_table_d1
if is_ral_char(data[0]):
if not is_ral_char(data[-1]):
raise ValueError("malformed bidi sequence in " + param)
# forbid L chars within R/AL sequence.
is_forbidden_bidi_char = stringprep.in_table_d2
else:
# forbid R/AL chars if start not setup correctly; L chars allowed.
is_forbidden_bidi_char = is_ral_char
# check for prohibited output - stringprep tables A.1, B.1, C.1.2, C.2 - C.9
in_table_a1 = stringprep.in_table_a1
in_table_c21_c22 = stringprep.in_table_c21_c22
in_table_c3 = stringprep.in_table_c3
in_table_c4 = stringprep.in_table_c4
in_table_c5 = stringprep.in_table_c5
in_table_c6 = stringprep.in_table_c6
in_table_c7 = stringprep.in_table_c7
in_table_c8 = stringprep.in_table_c8
in_table_c9 = stringprep.in_table_c9
for c in data:
# check for chars mapping stage should have removed
assert not in_table_b1(c), "failed to strip B.1 in mapping stage"
assert not in_table_c12(c), "failed to replace C.1.2 in mapping stage"
# check for forbidden chars
if in_table_a1(c):
raise ValueError("unassigned code points forbidden in " + param)
if in_table_c21_c22(c):
raise ValueError("control characters forbidden in " + param)
if in_table_c3(c):
raise ValueError("private use characters forbidden in " + param)
if in_table_c4(c):
raise ValueError("non-char code points forbidden in " + param)
if in_table_c5(c):
raise ValueError("surrogate codes forbidden in " + param)
if in_table_c6(c):
raise ValueError("non-plaintext chars forbidden in " + param)
if in_table_c7(c):
# XXX: should these have been caught by normalize?
# if so, should change this to an assert
raise ValueError("non-canonical chars forbidden in " + param)
if in_table_c8(c):
raise ValueError("display-modifying / deprecated chars "
"forbidden in" + param)
if in_table_c9(c):
raise ValueError("tagged characters forbidden in " + param)
# do bidi constraint check chosen by bidi init, above
if is_forbidden_bidi_char(c):
raise ValueError("forbidden bidi character in " + param)
return data
# replace saslprep() with stub when stringprep is missing
if stringprep is None: # pragma: no cover -- runtime detection
def saslprep(source, param="value"):
"""stub for saslprep()"""
raise NotImplementedError("saslprep() support requires the 'stringprep' "
"module, which is " + _stringprep_missing_reason)
#=============================================================================
# bytes helpers
#=============================================================================
def render_bytes(source, *args):
"""Peform ``%`` formating using bytes in a uniform manner across Python 2/3.
This function is motivated by the fact that
:class:`bytes` instances do not support ``%`` or ``{}`` formatting under Python 3.
This function is an attempt to provide a replacement:
it converts everything to unicode (decoding bytes instances as ``latin-1``),
performs the required formatting, then encodes the result to ``latin-1``.
Calling ``render_bytes(source, *args)`` should function roughly the same as
``source % args`` under Python 2.
.. todo::
python >= 3.5 added back limited support for bytes %,
can revisit when 3.3/3.4 is dropped.
"""
if isinstance(source, bytes):
source = source.decode("latin-1")
result = source % tuple(arg.decode("latin-1") if isinstance(arg, bytes)
else arg for arg in args)
return result.encode("latin-1")
if PY3:
# new in py32
def bytes_to_int(value):
return int.from_bytes(value, 'big')
def int_to_bytes(value, count):
return value.to_bytes(count, 'big')
else:
# XXX: can any of these be sped up?
from binascii import hexlify, unhexlify
def bytes_to_int(value):
return int(hexlify(value),16)
def int_to_bytes(value, count):
return unhexlify(('%%0%dx' % (count<<1)) % value)
add_doc(bytes_to_int, "decode byte string as single big-endian integer")
add_doc(int_to_bytes, "encode integer as single big-endian byte string")
def xor_bytes(left, right):
"""Perform bitwise-xor of two byte strings (must be same size)"""
return int_to_bytes(bytes_to_int(left) ^ bytes_to_int(right), len(left))
def repeat_string(source, size):
"""
repeat or truncate <source> string, so it has length <size>
"""
mult = 1 + (size - 1) // len(source)
return (source * mult)[:size]
def utf8_repeat_string(source, size):
"""
variant of repeat_string() which truncates to nearest UTF8 boundary.
"""
mult = 1 + (size - 1) // len(source)
return utf8_truncate(source * mult, size)
_BNULL = b"\x00"
_UNULL = u("\x00")
def right_pad_string(source, size, pad=None):
"""right-pad or truncate <source> string, so it has length <size>"""
cur = len(source)
if size > cur:
if pad is None:
pad = _UNULL if isinstance(source, unicode) else _BNULL
return source+pad*(size-cur)
else:
return source[:size]
def utf8_truncate(source, index):
"""
helper to truncate UTF8 byte string to nearest character boundary ON OR AFTER <index>.
returned prefix will always have length of at least <index>, and will stop on the
first byte that's not a UTF8 continuation byte (128 - 191 inclusive).
since utf8 should never take more than 4 bytes to encode known unicode values,
we can stop after ``index+3`` is reached.
:param bytes source:
:param int index:
:rtype: bytes
"""
# general approach:
#
# * UTF8 bytes will have high two bits (0xC0) as one of:
# 00 -- ascii char
# 01 -- ascii char
# 10 -- continuation of multibyte char
# 11 -- start of multibyte char.
# thus we can cut on anything where high bits aren't "10" (0x80; continuation byte)
#
# * UTF8 characters SHOULD always be 1 to 4 bytes, though they may be unbounded.
# so we just keep going until first non-continuation byte is encountered, or end of str.
# this should work predictably even for malformed/non UTF8 inputs.
if not isinstance(source, bytes):
raise ExpectedTypeError(source, bytes, "source")
# validate index
end = len(source)
if index < 0:
index = max(0, index + end)
if index >= end:
return source
# can stop search after 4 bytes, won't ever have longer utf8 sequence.
end = min(index + 3, end)
# loop until we find non-continuation byte
while index < end:
if byte_elem_value(source[index]) & 0xC0 != 0x80:
# found single-char byte, or start-char byte.
break
# else: found continuation byte.
index += 1
else:
assert index == end
# truncate at final index
result = source[:index]
def sanity_check():
# try to decode source
try:
text = source.decode("utf-8")
except UnicodeDecodeError:
# if source isn't valid utf8, byte level match is enough
return True
# validate that result was cut on character boundary
assert text.startswith(result.decode("utf-8"))
return True
assert sanity_check()
return result
#=============================================================================
# encoding helpers
#=============================================================================
_ASCII_TEST_BYTES = b"\x00\n aA:#!\x7f"
_ASCII_TEST_UNICODE = _ASCII_TEST_BYTES.decode("ascii")
def is_ascii_codec(codec):
"""Test if codec is compatible with 7-bit ascii (e.g. latin-1, utf-8; but not utf-16)"""
return _ASCII_TEST_UNICODE.encode(codec) == _ASCII_TEST_BYTES
def is_same_codec(left, right):
"""Check if two codec names are aliases for same codec"""
if left == right:
return True
if not (left and right):
return False
return _lookup_codec(left).name == _lookup_codec(right).name
_B80 = b'\x80'[0]
_U80 = u('\x80')
def is_ascii_safe(source):
"""Check if string (bytes or unicode) contains only 7-bit ascii"""
r = _B80 if isinstance(source, bytes) else _U80
return all(c < r for c in source)
def to_bytes(source, encoding="utf-8", param="value", source_encoding=None):
"""Helper to normalize input to bytes.
:arg source:
Source bytes/unicode to process.
:arg encoding:
Target encoding (defaults to ``"utf-8"``).
:param param:
Optional name of variable/noun to reference when raising errors
:param source_encoding:
If this is specified, and the source is bytes,
the source will be transcoded from *source_encoding* to *encoding*
(via unicode).
:raises TypeError: if source is not unicode or bytes.
:returns:
* unicode strings will be encoded using *encoding*, and returned.
* if *source_encoding* is not specified, byte strings will be
returned unchanged.
* if *source_encoding* is specified, byte strings will be transcoded
to *encoding*.
"""
assert encoding
if isinstance(source, bytes):
if source_encoding and not is_same_codec(source_encoding, encoding):
return source.decode(source_encoding).encode(encoding)
else:
return source
elif isinstance(source, unicode):
return source.encode(encoding)
else:
raise ExpectedStringError(source, param)
def to_unicode(source, encoding="utf-8", param="value"):
"""Helper to normalize input to unicode.
:arg source:
source bytes/unicode to process.
:arg encoding:
encoding to use when decoding bytes instances.
:param param:
optional name of variable/noun to reference when raising errors.
:raises TypeError: if source is not unicode or bytes.
:returns:
* returns unicode strings unchanged.
* returns bytes strings decoded using *encoding*
"""
assert encoding
if isinstance(source, unicode):
return source
elif isinstance(source, bytes):
return source.decode(encoding)
else:
raise ExpectedStringError(source, param)
if PY3:
def to_native_str(source, encoding="utf-8", param="value"):
if isinstance(source, bytes):
return source.decode(encoding)
elif isinstance(source, unicode):
return source
else:
raise ExpectedStringError(source, param)
else:
def to_native_str(source, encoding="utf-8", param="value"):
if isinstance(source, bytes):
return source
elif isinstance(source, unicode):
return source.encode(encoding)
else:
raise ExpectedStringError(source, param)
add_doc(to_native_str,
"""Take in unicode or bytes, return native string.
Python 2: encodes unicode using specified encoding, leaves bytes alone.
Python 3: leaves unicode alone, decodes bytes using specified encoding.
:raises TypeError: if source is not unicode or bytes.
:arg source:
source unicode or bytes string.
:arg encoding:
encoding to use when encoding unicode or decoding bytes.
this defaults to ``"utf-8"``.
:param param:
optional name of variable/noun to reference when raising errors.
:returns: :class:`str` instance
""")
@deprecated_function(deprecated="1.6", removed="1.7")
def to_hash_str(source, encoding="ascii"): # pragma: no cover -- deprecated & unused
"""deprecated, use to_native_str() instead"""
return to_native_str(source, encoding, param="hash")
_true_set = set("true t yes y on 1 enable enabled".split())
_false_set = set("false f no n off 0 disable disabled".split())
_none_set = set(["", "none"])
def as_bool(value, none=None, param="boolean"):
"""
helper to convert value to boolean.
recognizes strings such as "true", "false"
"""
assert none in [True, False, None]
if isinstance(value, unicode_or_bytes_types):
clean = value.lower().strip()
if clean in _true_set:
return True
if clean in _false_set:
return False
if clean in _none_set:
return none
raise ValueError("unrecognized %s value: %r" % (param, value))
elif isinstance(value, bool):
return value
elif value is None:
return none
else:
return bool(value)
#=============================================================================
# host OS helpers
#=============================================================================
def is_safe_crypt_input(value):
"""
UT helper --
test if value is safe to pass to crypt.crypt();
under PY3, can't pass non-UTF8 bytes to crypt.crypt.
"""
if crypt_accepts_bytes or not isinstance(value, bytes):
return True
try:
value.decode("utf-8")
return True
except UnicodeDecodeError:
return False
try:
from crypt import crypt as _crypt
except ImportError: # pragma: no cover
_crypt = None
has_crypt = False
crypt_accepts_bytes = False
crypt_needs_lock = False
_safe_crypt_lock = None
def safe_crypt(secret, hash):
return None
else:
has_crypt = True
_NULL = '\x00'
# XXX: replace this with lazy-evaluated bug detection?
if threading and PYPY and (7, 2, 0) <= sys.pypy_version_info <= (7, 3, 3):
#: internal lock used to wrap crypt() calls.
#: WARNING: if non-zdppy_password_hash code invokes crypt(), this lock won't be enough!
_safe_crypt_lock = threading.Lock()
#: detect if crypt.crypt() needs a thread lock around calls.
crypt_needs_lock = True
else:
from zdppy_password_hash.utils.compat import nullcontext
_safe_crypt_lock = nullcontext()
crypt_needs_lock = False
# some crypt() variants will return various constant strings when
# an invalid/unrecognized config string is passed in; instead of
# returning NULL / None. examples include ":", ":0", "*0", etc.
# safe_crypt() returns None for any string starting with one of the
# chars in this string...
_invalid_prefixes = u("*:!")
if PY3:
# * pypy3 (as of v7.3.1) has a crypt which accepts bytes, or ASCII-only unicode.
# * whereas CPython3 (as of v3.9) has a crypt which doesn't take bytes,
# but accepts ANY unicode (which it always encodes to UTF8).
crypt_accepts_bytes = True
try:
_crypt(b"\xEE", "xx")
except TypeError:
# CPython will throw TypeError
crypt_accepts_bytes = False
except: # no pragma
# don't care about other errors this might throw,
# just want to see if we get past initial type-coercion step.
pass
def safe_crypt(secret, hash):
if crypt_accepts_bytes:
# PyPy3 -- all bytes accepted, but unicode encoded to ASCII,
# so handling that ourselves.
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _BNULL in secret:
raise ValueError("null character in secret")
if isinstance(hash, unicode):
hash = hash.encode("ascii")
else:
# CPython3's crypt() doesn't take bytes, only unicode; unicode which is then
# encoding using utf-8 before passing to the C-level crypt().
# so we have to decode the secret.
if isinstance(secret, bytes):
orig = secret
try:
secret = secret.decode("utf-8")
except UnicodeDecodeError:
return None
# sanity check it encodes back to original byte string,
# otherwise when crypt() does it's encoding, it'll hash the wrong bytes!
assert secret.encode("utf-8") == orig, \
"utf-8 spec says this can't happen!"
if _NULL in secret:
raise ValueError("null character in secret")
if isinstance(hash, bytes):
hash = hash.decode("ascii")
try:
with _safe_crypt_lock:
result = _crypt(secret, hash)
except OSError:
# new in py39 -- per https://bugs.python.org/issue39289,
# crypt() now throws OSError for various things, mainly unknown hash formats
# translating that to None for now (may revise safe_crypt behavior in future)
return None
# NOTE: per issue 113, crypt() may return bytes in some odd cases.
# assuming it should still return an ASCII hash though,
# or there's a bigger issue at hand.
if isinstance(result, bytes):
result = result.decode("ascii")
if not result or result[0] in _invalid_prefixes:
return None
return result
else:
#: see feature-detection in PY3 fork above
crypt_accepts_bytes = True
# Python 2 crypt handler
def safe_crypt(secret, hash):
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
if _NULL in secret:
raise ValueError("null character in secret")
if isinstance(hash, unicode):
hash = hash.encode("ascii")
with _safe_crypt_lock:
result = _crypt(secret, hash)
if not result:
return None
result = result.decode("ascii")
if result[0] in _invalid_prefixes:
return None
return result
add_doc(safe_crypt, """Wrapper around stdlib's crypt.
This is a wrapper around stdlib's :func:`!crypt.crypt`, which attempts
to provide uniform behavior across Python 2 and 3.
:arg secret:
password, as bytes or unicode (unicode will be encoded as ``utf-8``).
:arg hash:
hash or config string, as ascii bytes or unicode.
:returns:
resulting hash as ascii unicode; or ``None`` if the password
couldn't be hashed due to one of the issues:
* :func:`crypt()` not available on platform.
* Under Python 3, if *secret* is specified as bytes,
it must be use ``utf-8`` or it can't be passed
to :func:`crypt()`.
* Some OSes will return ``None`` if they don't recognize
the algorithm being used (though most will simply fall
back to des-crypt).
* Some OSes will return an error string if the input config
is recognized but malformed; current code converts these to ``None``
as well.
""")
def test_crypt(secret, hash):
"""check if :func:`crypt.crypt` supports specific hash
:arg secret: password to test
:arg hash: known hash of password to use as reference
:returns: True or False
"""
# safe_crypt() always returns unicode, which means that for py3,
# 'hash' can't be bytes, or "== hash" will never be True.
# under py2 unicode & str(bytes) will compare fine;
# so just enforcing "unicode_or_str" limitation
assert isinstance(hash, unicode_or_str), \
"hash must be unicode_or_str, got %s" % type(hash)
assert hash, "hash must be non-empty"
return safe_crypt(secret, hash) == hash
timer = timeit.default_timer
# legacy alias, will be removed in zdppy_password_hash 2.0
tick = timer
def parse_version(source):
"""helper to parse version string"""
m = re.search(r"(\d+(?:\.\d+)+)", source)
if m:
return tuple(int(elem) for elem in m.group(1).split("."))
return None
#=============================================================================
# randomness
#=============================================================================
#------------------------------------------------------------------------
# setup rng for generating salts
#------------------------------------------------------------------------
# NOTE:
# generating salts (e.g. h64_gensalt, below) doesn't require cryptographically
# strong randomness. it just requires enough range of possible outputs
# that making a rainbow table is too costly. so it should be ok to
# fall back on python's builtin mersenne twister prng, as long as it's seeded each time
# this module is imported, using a couple of minor entropy sources.
try:
os.urandom(1)
has_urandom = True
except NotImplementedError: # pragma: no cover
has_urandom = False
def genseed(value=None):
"""generate prng seed value from system resources"""
from hashlib import sha512
if hasattr(value, "getstate") and hasattr(value, "getrandbits"):
# caller passed in RNG as seed value
try:
value = value.getstate()
except NotImplementedError:
# this method throws error for e.g. SystemRandom instances,
# so fall back to extracting 4k of state
value = value.getrandbits(1 << 15)
text = u("%s %s %s %.15f %.15f %s") % (
# if caller specified a seed value, mix it in
value,
# add current process id
# NOTE: not available in some environments, e.g. GAE
os.getpid() if hasattr(os, "getpid") else None,
# id of a freshly created object.
# (at least 1 byte of which should be hard to predict)
id(object()),
# the current time, to whatever precision os uses
time.time(),
tick(),
# if urandom available, might as well mix some bytes in.
os.urandom(32).decode("latin-1") if has_urandom else 0,
)
# hash it all up and return it as int/long
return int(sha512(text.encode("utf-8")).hexdigest(), 16)
if has_urandom:
rng = random.SystemRandom()
else: # pragma: no cover -- runtime detection
# NOTE: to reseed use ``rng.seed(genseed(rng))``
# XXX: could reseed on every call
rng = random.Random(genseed())
#------------------------------------------------------------------------
# some rng helpers
#------------------------------------------------------------------------
def getrandbytes(rng, count):
"""return byte-string containing *count* number of randomly generated bytes, using specified rng"""
# NOTE: would be nice if this was present in stdlib Random class
###just in case rng provides this...
##meth = getattr(rng, "getrandbytes", None)
##if meth:
## return meth(count)
if not count:
return _BEMPTY
def helper():
# XXX: break into chunks for large number of bits?
value = rng.getrandbits(count<<3)
i = 0
while i < count:
yield value & 0xff
value >>= 3
i += 1
return join_byte_values(helper())
def getrandstr(rng, charset, count):
"""return string containing *count* number of chars/bytes, whose elements are drawn from specified charset, using specified rng"""
# NOTE: tests determined this is 4x faster than rng.sample(),
# which is why that's not being used here.
# check alphabet & count
if count < 0:
raise ValueError("count must be >= 0")
letters = len(charset)
if letters == 0:
raise ValueError("alphabet must not be empty")
if letters == 1:
return charset * count
# get random value, and write out to buffer
def helper():
# XXX: break into chunks for large number of letters?
value = rng.randrange(0, letters**count)
i = 0
while i < count:
yield charset[value % letters]
value //= letters
i += 1
if isinstance(charset, unicode):
return join_unicode(helper())
else:
return join_byte_elems(helper())
_52charset = '2346789ABCDEFGHJKMNPQRTUVWXYZabcdefghjkmnpqrstuvwxyz'
@deprecated_function(deprecated="1.7", removed="2.0",
replacement="zdppy_password_hash.pwd.genword() / zdppy_password_hash.pwd.genphrase()")
def generate_password(size=10, charset=_52charset):
"""generate random password using given length & charset
:param size:
size of password.
:param charset:
optional string specified set of characters to draw from.
the default charset contains all normal alphanumeric characters,
except for the characters ``1IiLl0OoS5``, which were omitted
due to their visual similarity.
:returns: :class:`!str` containing randomly generated password.
.. note::
Using the default character set, on a OS with :class:`!SystemRandom` support,
this function should generate passwords with 5.7 bits of entropy per character.
"""
return getrandstr(rng, charset, size)
#=============================================================================
# object type / interface tests
#=============================================================================
_handler_attrs = (
"name",
"setting_kwds", "context_kwds",
"verify", "hash", "identify",
)
def is_crypt_handler(obj):
"""check if object follows the :ref:`password-hash-api`"""
# XXX: change to use isinstance(obj, PasswordHash) under py26+?
return all(hasattr(obj, name) for name in _handler_attrs)
_context_attrs = (
"needs_update",
"genconfig", "genhash",
"verify", "encrypt", "identify",
)
def is_crypt_context(obj):
"""check if object appears to be a :class:`~zdppy_password_hash.context.CryptContext` instance"""
# XXX: change to use isinstance(obj, CryptContext)?
return all(hasattr(obj, name) for name in _context_attrs)
##def has_many_backends(handler):
## "check if handler provides multiple baceknds"
## # NOTE: should also provide get_backend(), .has_backend(), and .backends attr
## return hasattr(handler, "set_backend")
def has_rounds_info(handler):
"""check if handler provides the optional :ref:`rounds information <rounds-attributes>` attributes"""
return ('rounds' in handler.setting_kwds and
getattr(handler, "min_rounds", None) is not None)
def has_salt_info(handler):
"""check if handler provides the optional :ref:`salt information <salt-attributes>` attributes"""
return ('salt' in handler.setting_kwds and
getattr(handler, "min_salt_size", None) is not None)
##def has_raw_salt(handler):
## "check if handler takes in encoded salt as unicode (False), or decoded salt as bytes (True)"
## sc = getattr(handler, "salt_chars", None)
## if sc is None:
## return None
## elif isinstance(sc, unicode):
## return False
## elif isinstance(sc, bytes):
## return True
## else:
## raise TypeError("handler.salt_chars must be None/unicode/bytes")
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/__init__.py | __init__.py |
#=============================================================================
# imports
#=============================================================================
from __future__ import with_statement
# core
import inspect
import logging; log = logging.getLogger(__name__)
import math
import threading
from warnings import warn
# site
# pkg
import zdppy_password_hash.exc as exc, zdppy_password_hash.ifc as ifc
from zdppy_password_hash.exc import MissingBackendError, PasslibConfigWarning, \
PasslibHashWarning
from zdppy_password_hash.ifc import PasswordHash
from zdppy_password_hash.registry import get_crypt_handler
from zdppy_password_hash.utils import (
consteq, getrandstr, getrandbytes,
rng, to_native_str,
is_crypt_handler, to_unicode,
MAX_PASSWORD_SIZE, accepts_keyword, as_bool,
update_mixin_classes)
from zdppy_password_hash.utils.binary import (
BASE64_CHARS, HASH64_CHARS, PADDED_BASE64_CHARS,
HEX_CHARS, UPPER_HEX_CHARS, LOWER_HEX_CHARS,
ALL_BYTE_VALUES,
)
from zdppy_password_hash.utils.compat import join_byte_values, irange, u, native_string_types, \
uascii_to_str, join_unicode, unicode, str_to_uascii, \
join_unicode, unicode_or_bytes_types, PY2, int_types
from zdppy_password_hash.utils.decor import classproperty, deprecated_method
# local
__all__ = [
# helpers for implementing MCF handlers
'parse_mc2',
'parse_mc3',
'render_mc2',
'render_mc3',
# framework for implementing handlers
'GenericHandler',
'StaticHandler',
'HasUserContext',
'HasRawChecksum',
'HasManyIdents',
'HasSalt',
'HasRawSalt',
'HasRounds',
'HasManyBackends',
# other helpers
'PrefixWrapper',
# TODO: a bunch of other things are commonly assumed in this namespace
# (e.g. HEX_CHARS etc); need to audit uses and update this list.
]
#=============================================================================
# constants
#=============================================================================
# deprecated aliases - will be removed after zdppy_password_hash 1.8
H64_CHARS = HASH64_CHARS
B64_CHARS = BASE64_CHARS
PADDED_B64_CHARS = PADDED_BASE64_CHARS
UC_HEX_CHARS = UPPER_HEX_CHARS
LC_HEX_CHARS = LOWER_HEX_CHARS
#=============================================================================
# support functions
#=============================================================================
def _bitsize(count, chars):
"""helper for bitsize() methods"""
if chars and count:
import math
return int(count * math.log(len(chars), 2))
else:
return 0
def guess_app_stacklevel(start=1):
"""
try to guess stacklevel for application warning.
looks for first frame not part of zdppy_password_hash.
"""
frame = inspect.currentframe()
count = -start
try:
while frame:
name = frame.f_globals.get('__name__', "")
if name.startswith("zdppy_password_hash.tests.") or not name.startswith("zdppy_password_hash."):
return max(1, count)
count += 1
frame = frame.f_back
return start
finally:
del frame
def warn_hash_settings_deprecation(handler, kwds):
warn("passing settings to %(handler)s.hash() is deprecated, and won't be supported in Passlib 2.0; "
"use '%(handler)s.using(**settings).hash(secret)' instead" % dict(handler=handler.name),
DeprecationWarning, stacklevel=guess_app_stacklevel(2))
def extract_settings_kwds(handler, kwds):
"""
helper to extract settings kwds from mix of context & settings kwds.
pops settings keys from kwds, returns them as a dict.
"""
context_keys = set(handler.context_kwds)
return dict((key, kwds.pop(key)) for key in list(kwds) if key not in context_keys)
#=============================================================================
# parsing helpers
#=============================================================================
_UDOLLAR = u("$")
_UZERO = u("0")
def validate_secret(secret):
"""ensure secret has correct type & size"""
if not isinstance(secret, unicode_or_bytes_types):
raise exc.ExpectedStringError(secret, "secret")
if len(secret) > MAX_PASSWORD_SIZE:
raise exc.PasswordSizeError(MAX_PASSWORD_SIZE)
def to_unicode_for_identify(hash):
"""convert hash to unicode for identify method"""
if isinstance(hash, unicode):
return hash
elif isinstance(hash, bytes):
# try as utf-8, but if it fails, use foolproof latin-1,
# since we don't really care about non-ascii chars
# when running identify.
try:
return hash.decode("utf-8")
except UnicodeDecodeError:
return hash.decode("latin-1")
else:
raise exc.ExpectedStringError(hash, "hash")
def parse_mc2(hash, prefix, sep=_UDOLLAR, handler=None):
"""parse hash using 2-part modular crypt format.
this expects a hash of the format :samp:`{prefix}{salt}[${checksum}]`,
such as md5_crypt, and parses it into salt / checksum portions.
:arg hash: the hash to parse (bytes or unicode)
:arg prefix: the identifying prefix (unicode)
:param sep: field separator (unicode, defaults to ``$``).
:param handler: handler class to pass to error constructors.
:returns:
a ``(salt, chk | None)`` tuple.
"""
# detect prefix
hash = to_unicode(hash, "ascii", "hash")
assert isinstance(prefix, unicode)
if not hash.startswith(prefix):
raise exc.InvalidHashError(handler)
# parse 2-part hash or 1-part config string
assert isinstance(sep, unicode)
parts = hash[len(prefix):].split(sep)
if len(parts) == 2:
salt, chk = parts
return salt, chk or None
elif len(parts) == 1:
return parts[0], None
else:
raise exc.MalformedHashError(handler)
def parse_mc3(hash, prefix, sep=_UDOLLAR, rounds_base=10,
default_rounds=None, handler=None):
"""parse hash using 3-part modular crypt format.
this expects a hash of the format :samp:`{prefix}[{rounds}]${salt}[${checksum}]`,
such as sha1_crypt, and parses it into rounds / salt / checksum portions.
tries to convert the rounds to an integer,
and throws error if it has zero-padding.
:arg hash: the hash to parse (bytes or unicode)
:arg prefix: the identifying prefix (unicode)
:param sep: field separator (unicode, defaults to ``$``).
:param rounds_base:
the numeric base the rounds are encoded in (defaults to base 10).
:param default_rounds:
the default rounds value to return if the rounds field was omitted.
if this is ``None`` (the default), the rounds field is *required*.
:param handler: handler class to pass to error constructors.
:returns:
a ``(rounds : int, salt, chk | None)`` tuple.
"""
# detect prefix
hash = to_unicode(hash, "ascii", "hash")
assert isinstance(prefix, unicode)
if not hash.startswith(prefix):
raise exc.InvalidHashError(handler)
# parse 3-part hash or 2-part config string
assert isinstance(sep, unicode)
parts = hash[len(prefix):].split(sep)
if len(parts) == 3:
rounds, salt, chk = parts
elif len(parts) == 2:
rounds, salt = parts
chk = None
else:
raise exc.MalformedHashError(handler)
# validate & parse rounds portion
if rounds.startswith(_UZERO) and rounds != _UZERO:
raise exc.ZeroPaddedRoundsError(handler)
elif rounds:
rounds = int(rounds, rounds_base)
elif default_rounds is None:
raise exc.MalformedHashError(handler, "empty rounds field")
else:
rounds = default_rounds
# return result
return rounds, salt, chk or None
# def parse_mc3_long(hash, prefix, sep=_UDOLLAR, handler=None):
# """
# parse hash using 3-part modular crypt format,
# with complex settings string instead of simple rounds.
# otherwise works same as :func:`parse_mc3`
# """
# # detect prefix
# hash = to_unicode(hash, "ascii", "hash")
# assert isinstance(prefix, unicode)
# if not hash.startswith(prefix):
# raise exc.InvalidHashError(handler)
#
# # parse 3-part hash or 2-part config string
# assert isinstance(sep, unicode)
# parts = hash[len(prefix):].split(sep)
# if len(parts) == 3:
# return parts
# elif len(parts) == 2:
# settings, salt = parts
# return settings, salt, None
# else:
# raise exc.MalformedHashError(handler)
def parse_int(source, base=10, default=None, param="value", handler=None):
"""
helper to parse an integer config field
:arg source: unicode source string
:param base: numeric base
:param default: optional default if source is empty
:param param: name of variable, for error msgs
:param handler: handler class, for error msgs
"""
if source.startswith(_UZERO) and source != _UZERO:
raise exc.MalformedHashError(handler, "zero-padded %s field" % param)
elif source:
return int(source, base)
elif default is None:
raise exc.MalformedHashError(handler, "empty %s field" % param)
else:
return default
#=============================================================================
# formatting helpers
#=============================================================================
def render_mc2(ident, salt, checksum, sep=u("$")):
"""format hash using 2-part modular crypt format; inverse of parse_mc2()
returns native string with format :samp:`{ident}{salt}[${checksum}]`,
such as used by md5_crypt.
:arg ident: identifier prefix (unicode)
:arg salt: encoded salt (unicode)
:arg checksum: encoded checksum (unicode or None)
:param sep: separator char (unicode, defaults to ``$``)
:returns:
config or hash (native str)
"""
if checksum:
parts = [ident, salt, sep, checksum]
else:
parts = [ident, salt]
return uascii_to_str(join_unicode(parts))
def render_mc3(ident, rounds, salt, checksum, sep=u("$"), rounds_base=10):
"""format hash using 3-part modular crypt format; inverse of parse_mc3()
returns native string with format :samp:`{ident}[{rounds}$]{salt}[${checksum}]`,
such as used by sha1_crypt.
:arg ident: identifier prefix (unicode)
:arg rounds: rounds field (int or None)
:arg salt: encoded salt (unicode)
:arg checksum: encoded checksum (unicode or None)
:param sep: separator char (unicode, defaults to ``$``)
:param rounds_base: base to encode rounds value (defaults to base 10)
:returns:
config or hash (native str)
"""
if rounds is None:
rounds = u('')
elif rounds_base == 16:
rounds = u("%x") % rounds
else:
assert rounds_base == 10
rounds = unicode(rounds)
if checksum:
parts = [ident, rounds, sep, salt, sep, checksum]
else:
parts = [ident, rounds, sep, salt]
return uascii_to_str(join_unicode(parts))
def mask_value(value, show=4, pct=0.125, char=u"*"):
"""
helper to mask contents of sensitive field.
:param value:
raw value (str, bytes, etc)
:param show:
max # of characters to remain visible
:param pct:
don't show more than this % of input.
:param char:
character to use for masking
:rtype: str | None
"""
if value is None:
return None
if not isinstance(value, unicode):
if isinstance(value, bytes):
from zdppy_password_hash.utils.binary import ab64_encode
value = ab64_encode(value).decode("ascii")
else:
value = unicode(value)
size = len(value)
show = min(show, int(size * pct))
return value[:show] + char * (size - show)
#=============================================================================
# parameter helpers
#=============================================================================
def validate_default_value(handler, default, norm, param="value"):
"""
assert helper that quickly validates default value.
designed to get out of the way and reduce overhead when asserts are stripped.
"""
assert default is not None, "%s lacks default %s" % (handler.name, param)
assert norm(default) == default, "%s: invalid default %s: %r" % (handler.name, param, default)
return True
def norm_integer(handler, value, min=1, max=None, # *
param="value", relaxed=False):
"""
helper to normalize and validate an integer value (e.g. rounds, salt_size)
:arg value: value provided to constructor
:arg default: default value if none provided. if set to ``None``, value is required.
:arg param: name of parameter (xxx: move to first arg?)
:param min: minimum value (defaults to 1)
:param max: maximum value (default ``None`` means no maximum)
:returns: validated value
"""
# check type
if not isinstance(value, int_types):
raise exc.ExpectedTypeError(value, "integer", param)
# check minimum
if value < min:
msg = "%s: %s (%d) is too low, must be at least %d" % (handler.name, param, value, min)
if relaxed:
warn(msg, exc.PasslibHashWarning)
value = min
else:
raise ValueError(msg)
# check maximum
if max and value > max:
msg = "%s: %s (%d) is too large, cannot be more than %d" % (handler.name, param, value, max)
if relaxed:
warn(msg, exc.PasslibHashWarning)
value = max
else:
raise ValueError(msg)
return value
#=============================================================================
# MinimalHandler
#=============================================================================
class MinimalHandler(PasswordHash):
"""
helper class for implementing hash handlers.
provides nothing besides a base implementation of the .using() subclass constructor.
"""
#===================================================================
# class attr
#===================================================================
#: private flag used by using() constructor to detect if this is already a subclass.
_configured = False
#===================================================================
# configuration interface
#===================================================================
@classmethod
def using(cls, relaxed=False):
# NOTE: this provides the base implementation, which takes care of
# creating the newly configured class. Mixins and subclasses
# should wrap this, and modify the returned class to suit their options.
# NOTE: 'relaxed' keyword is ignored here, but parsed so that subclasses
# can check for it as argument, and modify their parsing behavior accordingly.
name = cls.__name__
if not cls._configured:
# TODO: straighten out class naming, repr, and .name attr
name = "<customized %s hasher>" % name
return type(name, (cls,), dict(__module__=cls.__module__, _configured=True))
#===================================================================
# eoc
#===================================================================
class TruncateMixin(MinimalHandler):
"""
PasswordHash mixin which provides a method
that will check if secret would be truncated,
and can be configured to throw an error.
.. warning::
Hashers using this mixin will generally need to override
the default PasswordHash.truncate_error policy of "True",
and will similarly want to override .truncate_verify_reject as well.
TODO: This should be done explicitly, but for now this mixin sets
these flags implicitly.
"""
truncate_error = False
truncate_verify_reject = False
@classmethod
def using(cls, truncate_error=None, **kwds):
subcls = super(TruncateMixin, cls).using(**kwds)
if truncate_error is not None:
truncate_error = as_bool(truncate_error, param="truncate_error")
if truncate_error is not None:
subcls.truncate_error = truncate_error
return subcls
@classmethod
def _check_truncate_policy(cls, secret):
"""
make sure secret won't be truncated.
NOTE: this should only be called for .hash(), not for .verify(),
which should honor the .truncate_verify_reject policy.
"""
assert cls.truncate_size is not None, "truncate_size must be set by subclass"
if cls.truncate_error and len(secret) > cls.truncate_size:
raise exc.PasswordTruncateError(cls)
#=============================================================================
# GenericHandler
#=============================================================================
class GenericHandler(MinimalHandler):
"""helper class for implementing hash handlers.
GenericHandler-derived classes will have (at least) the following
constructor options, though others may be added by mixins
and by the class itself:
:param checksum:
this should contain the digest portion of a
parsed hash (mainly provided when the constructor is called
by :meth:`from_string()`).
defaults to ``None``.
:param use_defaults:
If ``False`` (the default), a :exc:`TypeError` should be thrown
if any settings required by the handler were not explicitly provided.
If ``True``, the handler should attempt to provide a default for any
missing values. This means generate missing salts, fill in default
cost parameters, etc.
This is typically only set to ``True`` when the constructor
is called by :meth:`hash`, allowing user-provided values
to be handled in a more permissive manner.
:param relaxed:
If ``False`` (the default), a :exc:`ValueError` should be thrown
if any settings are out of bounds or otherwise invalid.
If ``True``, they should be corrected if possible, and a warning
issue. If not possible, only then should an error be raised.
(e.g. under ``relaxed=True``, rounds values will be clamped
to min/max rounds).
This is mainly used when parsing the config strings of certain
hashes, whose specifications implementations to be tolerant
of incorrect values in salt strings.
Class Attributes
================
.. attribute:: ident
[optional]
If this attribute is filled in, the default :meth:`identify` method will use
it as a identifying prefix that can be used to recognize instances of this handler's
hash. Filling this out is recommended for speed.
This should be a unicode str.
.. attribute:: _hash_regex
[optional]
If this attribute is filled in, the default :meth:`identify` method
will use it to recognize instances of the hash. If :attr:`ident`
is specified, this will be ignored.
This should be a unique regex object.
.. attribute:: checksum_size
[optional]
Specifies the number of characters that should be expected in the checksum string.
If omitted, no check will be performed.
.. attribute:: checksum_chars
[optional]
A string listing all the characters allowed in the checksum string.
If omitted, no check will be performed.
This should be a unicode str.
.. attribute:: _stub_checksum
Placeholder checksum that will be used by genconfig()
in lieu of actually generating a hash for the empty string.
This should be a string of the same datatype as :attr:`checksum`.
Instance Attributes
===================
.. attribute:: checksum
The checksum string provided to the constructor (after passing it
through :meth:`_norm_checksum`).
Required Subclass Methods
=========================
The following methods must be provided by handler subclass:
.. automethod:: from_string
.. automethod:: to_string
.. automethod:: _calc_checksum
Default Methods
===============
The following methods have default implementations that should work for
most cases, though they may be overridden if the hash subclass needs to:
.. automethod:: _norm_checksum
.. automethod:: genconfig
.. automethod:: genhash
.. automethod:: identify
.. automethod:: hash
.. automethod:: verify
"""
#===================================================================
# class attr
#===================================================================
# this must be provided by the actual class.
setting_kwds = None
# providing default since most classes don't use this at all.
context_kwds = ()
# optional prefix that uniquely identifies hash
ident = None
# optional regexp for recognizing hashes,
# used by default identify() if .ident isn't specified.
_hash_regex = None
# if specified, _norm_checksum will require this length
checksum_size = None
# if specified, _norm_checksum() will validate this
checksum_chars = None
# private flag used by HasRawChecksum
_checksum_is_bytes = False
#===================================================================
# instance attrs
#===================================================================
checksum = None # stores checksum
# use_defaults = False # whether _norm_xxx() funcs should fill in defaults.
# relaxed = False # when _norm_xxx() funcs should be strict about inputs
#===================================================================
# init
#===================================================================
def __init__(self, checksum=None, use_defaults=False, **kwds):
self.use_defaults = use_defaults
super(GenericHandler, self).__init__(**kwds)
if checksum is not None:
# XXX: do we need to set .relaxed for checksum coercion?
self.checksum = self._norm_checksum(checksum)
# NOTE: would like to make this classmethod, but fshp checksum size
# is dependant on .variant, so leaving this as instance method.
def _norm_checksum(self, checksum, relaxed=False):
"""validates checksum keyword against class requirements,
returns normalized version of checksum.
"""
# NOTE: by default this code assumes checksum should be unicode.
# For classes where the checksum is raw bytes, the HasRawChecksum sets
# the _checksum_is_bytes flag which alters various code paths below.
# normalize to bytes / unicode
raw = self._checksum_is_bytes
if raw:
# NOTE: no clear route to reasonably convert unicode -> raw bytes,
# so 'relaxed' does nothing here
if not isinstance(checksum, bytes):
raise exc.ExpectedTypeError(checksum, "bytes", "checksum")
elif not isinstance(checksum, unicode):
if isinstance(checksum, bytes) and relaxed:
warn("checksum should be unicode, not bytes", PasslibHashWarning)
checksum = checksum.decode("ascii")
else:
raise exc.ExpectedTypeError(checksum, "unicode", "checksum")
# check size
cc = self.checksum_size
if cc and len(checksum) != cc:
raise exc.ChecksumSizeError(self, raw=raw)
# check charset
if not raw:
cs = self.checksum_chars
if cs and any(c not in cs for c in checksum):
raise ValueError("invalid characters in %s checksum" % (self.name,))
return checksum
#===================================================================
# password hash api - formatting interface
#===================================================================
@classmethod
def identify(cls, hash):
# NOTE: subclasses may wish to use faster / simpler identify,
# and raise value errors only when an invalid (but identifiable)
# string is parsed
hash = to_unicode_for_identify(hash)
if not hash:
return False
# does class specify a known unique prefix to look for?
ident = cls.ident
if ident is not None:
return hash.startswith(ident)
# does class provide a regexp to use?
pat = cls._hash_regex
if pat is not None:
return pat.match(hash) is not None
# as fallback, try to parse hash, and see if we succeed.
# inefficient, but works for most cases.
try:
cls.from_string(hash)
return True
except ValueError:
return False
@classmethod
def from_string(cls, hash, **context): # pragma: no cover
r"""
return parsed instance from hash/configuration string
:param \\*\\*context:
context keywords to pass to constructor (if applicable).
:raises ValueError: if hash is incorrectly formatted
:returns:
hash parsed into components,
for formatting / calculating checksum.
"""
raise NotImplementedError("%s must implement from_string()" % (cls,))
def to_string(self): # pragma: no cover
"""render instance to hash or configuration string
:returns:
hash string with salt & digest included.
should return native string type (ascii-bytes under python 2,
unicode under python 3)
"""
raise NotImplementedError("%s must implement from_string()" % (self.__class__,))
#===================================================================
# checksum generation
#===================================================================
# NOTE: this is only used by genconfig(), and will be removed in zdppy_password_hash 2.0
@property
def _stub_checksum(self):
"""
placeholder used by default .genconfig() so it can avoid expense of calculating digest.
"""
# used fixed string if available
if self.checksum_size:
if self._checksum_is_bytes:
return b'\x00' * self.checksum_size
if self.checksum_chars:
return self.checksum_chars[0] * self.checksum_size
# hack to minimize cost of calculating real checksum
if isinstance(self, HasRounds):
orig = self.rounds
self.rounds = self.min_rounds or 1
try:
return self._calc_checksum("")
finally:
self.rounds = orig
# final fallback, generate a real checksum
return self._calc_checksum("")
def _calc_checksum(self, secret): # pragma: no cover
"""given secret; calcuate and return encoded checksum portion of hash
string, taking config from object state
calc checksum implementations may assume secret is always
either unicode or bytes, checks are performed by verify/etc.
"""
raise NotImplementedError("%s must implement _calc_checksum()" %
(self.__class__,))
#===================================================================
#'application' interface (default implementation)
#===================================================================
@classmethod
def hash(cls, secret, **kwds):
if kwds:
# Deprecating passing any settings keywords via .hash() as of zdppy_password_hash 1.7; everything
# should use .using().hash() instead. If any keywords are specified, presume they're
# context keywords by default (the common case), and extract out any settings kwds.
# Support for passing settings via .hash() will be removed in Passlib 2.0, along with
# this block of code.
settings = extract_settings_kwds(cls, kwds)
if settings:
warn_hash_settings_deprecation(cls, settings)
return cls.using(**settings).hash(secret, **kwds)
# NOTE: at this point, 'kwds' should just contain context_kwds subset
validate_secret(secret)
self = cls(use_defaults=True, **kwds)
self.checksum = self._calc_checksum(secret)
return self.to_string()
@classmethod
def verify(cls, secret, hash, **context):
# NOTE: classes with multiple checksum encodings should either
# override this method, or ensure that from_string() / _norm_checksum()
# ensures .checksum always uses a single canonical representation.
validate_secret(secret)
self = cls.from_string(hash, **context)
chk = self.checksum
if chk is None:
raise exc.MissingDigestError(cls)
return consteq(self._calc_checksum(secret), chk)
#===================================================================
# legacy crypt interface
#===================================================================
@deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genconfig(cls, **kwds):
# NOTE: 'kwds' should generally always be settings, so after this completes, *should* be empty.
settings = extract_settings_kwds(cls, kwds)
if settings:
return cls.using(**settings).genconfig(**kwds)
# NOTE: this uses optional stub checksum to bypass potentially expensive digest generation,
# when caller just wants the config string.
self = cls(use_defaults=True, **kwds)
self.checksum = self._stub_checksum
return self.to_string()
@deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, **context):
if config is None:
raise TypeError("config must be string")
validate_secret(secret)
self = cls.from_string(config, **context)
self.checksum = self._calc_checksum(secret)
return self.to_string()
#===================================================================
# migration interface (basde implementation)
#===================================================================
@classmethod
def needs_update(cls, hash, secret=None, **kwds):
# NOTE: subclasses should generally just wrap _calc_needs_update()
# to check their particular keywords.
self = cls.from_string(hash)
assert isinstance(self, cls)
return self._calc_needs_update(secret=secret, **kwds)
def _calc_needs_update(self, secret=None):
"""
internal helper for :meth:`needs_update`.
"""
# NOTE: this just provides a stub, subclasses & mixins
# should override this with their own tests.
return False
#===================================================================
# experimental - the following methods are not finished or tested,
# but way work correctly for some hashes
#===================================================================
#: internal helper for forcing settings to be included, even if default matches
_always_parse_settings = ()
#: internal helper for excluding certain setting_kwds from parsehash() result
_unparsed_settings = ("salt_size", "relaxed")
#: parsehash() keys that need to be sanitized
_unsafe_settings = ("salt", "checksum")
@classproperty
def _parsed_settings(cls):
"""
helper for :meth:`parsehash` --
returns list of attributes which should be extracted by parse_hash() from hasher object.
default implementation just takes setting_kwds, and excludes _unparsed_settings
"""
return tuple(key for key in cls.setting_kwds if key not in cls._unparsed_settings)
@classmethod
def parsehash(cls, hash, checksum=True, sanitize=False):
"""[experimental method] parse hash into dictionary of settings.
this essentially acts as the inverse of :meth:`hash`: for most
cases, if ``hash = cls.hash(secret, **opts)``, then
``cls.parsehash(hash)`` will return a dict matching the original options
(with the extra keyword *checksum*).
this method may not work correctly for all hashes,
and may not be available on some few. its interface may
change in future releases, if it's kept around at all.
:arg hash: hash to parse
:param checksum: include checksum keyword? (defaults to True)
:param sanitize: mask data for sensitive fields? (defaults to False)
"""
# FIXME: this may not work for hashes with non-standard settings.
# XXX: how should this handle checksum/salt encoding?
# need to work that out for hash() anyways.
self = cls.from_string(hash)
# XXX: could split next few lines out as self._parsehash() for subclassing
# XXX: could try to resolve ident/variant to publically suitable alias.
# XXX: for v1.8, consider making "always" the default policy, and compare to class default
# only for whitelisted attrs? or make this whole method obsolete by reworking
# so "hasher" object & it's attrs are public?
UNSET = object()
always = self._always_parse_settings
kwds = dict((key, getattr(self, key)) for key in self._parsed_settings
if key in always or getattr(self, key) != getattr(cls, key, UNSET))
if checksum and self.checksum is not None:
kwds['checksum'] = self.checksum
if sanitize:
if sanitize is True:
sanitize = mask_value
for key in cls._unsafe_settings:
if key in kwds:
kwds[key] = sanitize(kwds[key])
return kwds
@classmethod
def bitsize(cls, **kwds):
"""[experimental method] return info about bitsizes of hash"""
try:
info = super(GenericHandler, cls).bitsize(**kwds)
except AttributeError:
info = {}
cc = ALL_BYTE_VALUES if cls._checksum_is_bytes else cls.checksum_chars
if cls.checksum_size and cc:
# FIXME: this may overestimate size due to padding bits (e.g. bcrypt)
# FIXME: this will be off by 1 for case-insensitive hashes.
info['checksum'] = _bitsize(cls.checksum_size, cc)
return info
#===================================================================
# eoc
#===================================================================
class StaticHandler(GenericHandler):
"""GenericHandler mixin for classes which have no settings.
This mixin assumes the entirety of the hash ise stored in the
:attr:`checksum` attribute; that the hash has no rounds, salt,
etc. This class provides the following:
* a default :meth:`genconfig` that always returns None.
* a default :meth:`from_string` and :meth:`to_string`
that store the entire hash within :attr:`checksum`,
after optionally stripping a constant prefix.
All that is required by subclasses is an implementation of
the :meth:`_calc_checksum` method.
"""
# TODO: document _norm_hash()
setting_kwds = ()
# optional constant prefix subclasses can specify
_hash_prefix = u("")
@classmethod
def from_string(cls, hash, **context):
# default from_string() which strips optional prefix,
# and passes rest unchanged as checksum value.
hash = to_unicode(hash, "ascii", "hash")
hash = cls._norm_hash(hash)
# could enable this for extra strictness
##pat = cls._hash_regex
##if pat and pat.match(hash) is None:
## raise ValueError("not a valid %s hash" % (cls.name,))
prefix = cls._hash_prefix
if prefix:
if hash.startswith(prefix):
hash = hash[len(prefix):]
else:
raise exc.InvalidHashError(cls)
return cls(checksum=hash, **context)
@classmethod
def _norm_hash(cls, hash):
"""helper for subclasses to normalize case if needed"""
return hash
def to_string(self):
return uascii_to_str(self._hash_prefix + self.checksum)
# per-subclass: stores dynamically created subclass used by _calc_checksum() stub
__cc_compat_hack = None
def _calc_checksum(self, secret):
"""given secret; calcuate and return encoded checksum portion of hash
string, taking config from object state
"""
# NOTE: prior to 1.6, StaticHandler required classes implement genhash
# instead of this method. so if we reach here, we try calling genhash.
# if that succeeds, we issue deprecation warning. if it fails,
# we'll just recurse back to here, but in a different instance.
# so before we call genhash, we create a subclass which handles
# throwing the NotImplementedError.
cls = self.__class__
assert cls.__module__ != __name__
wrapper_cls = cls.__cc_compat_hack
if wrapper_cls is None:
def inner(self, secret):
raise NotImplementedError("%s must implement _calc_checksum()" %
(cls,))
wrapper_cls = cls.__cc_compat_hack = type(cls.__name__ + "_wrapper",
(cls,), dict(_calc_checksum=inner, __module__=cls.__module__))
context = dict((k,getattr(self,k)) for k in self.context_kwds)
# NOTE: passing 'config=None' here even though not currently allowed by ifc,
# since it *is* allowed under the old 1.5 ifc we're checking for here.
try:
hash = wrapper_cls.genhash(secret, None, **context)
except TypeError as err:
if str(err) == "config must be string":
raise NotImplementedError("%s must implement _calc_checksum()" %
(cls,))
else:
raise
warn("%r should be updated to implement StaticHandler._calc_checksum() "
"instead of StaticHandler.genhash(), support for the latter "
"style will be removed in Passlib 1.8" % cls,
DeprecationWarning)
return str_to_uascii(hash)
#=============================================================================
# GenericHandler mixin classes
#=============================================================================
class HasEncodingContext(GenericHandler):
"""helper for classes which require knowledge of the encoding used"""
context_kwds = ("encoding",)
default_encoding = "utf-8"
def __init__(self, encoding=None, **kwds):
super(HasEncodingContext, self).__init__(**kwds)
self.encoding = encoding or self.default_encoding
class HasUserContext(GenericHandler):
"""helper for classes which require a user context keyword"""
context_kwds = ("user",)
def __init__(self, user=None, **kwds):
super(HasUserContext, self).__init__(**kwds)
self.user = user
# XXX: would like to validate user input here, but calls to from_string()
# which lack context keywords would then fail; so leaving code per-handler.
# wrap funcs to accept 'user' as positional arg for ease of use.
@classmethod
def hash(cls, secret, user=None, **context):
return super(HasUserContext, cls).hash(secret, user=user, **context)
@classmethod
def verify(cls, secret, hash, user=None, **context):
return super(HasUserContext, cls).verify(secret, hash, user=user, **context)
@deprecated_method(deprecated="1.7", removed="2.0")
@classmethod
def genhash(cls, secret, config, user=None, **context):
return super(HasUserContext, cls).genhash(secret, config, user=user, **context)
# XXX: how to guess the entropy of a username?
# most of these hashes are for a system (e.g. Oracle)
# which has a few *very common* names and thus really low entropy;
# while the rest are slightly less predictable.
# need to find good reference about this.
##@classmethod
##def bitsize(cls, **kwds):
## info = super(HasUserContext, cls).bitsize(**kwds)
## info['user'] = xxx
## return info
#------------------------------------------------------------------------
# checksum mixins
#------------------------------------------------------------------------
class HasRawChecksum(GenericHandler):
"""mixin for classes which work with decoded checksum bytes
.. todo::
document this class's usage
"""
# NOTE: GenericHandler.checksum_chars is ignored by this implementation.
# NOTE: all HasRawChecksum code is currently part of GenericHandler,
# using private '_checksum_is_bytes' flag.
# this arrangement may be changed in the future.
_checksum_is_bytes = True
#------------------------------------------------------------------------
# ident mixins
#------------------------------------------------------------------------
class HasManyIdents(GenericHandler):
"""mixin for hashes which use multiple prefix identifiers
For the hashes which may use multiple identifier prefixes,
this mixin adds an ``ident`` keyword to constructor.
Any value provided is passed through the :meth:`norm_idents` method,
which takes care of validating the identifier,
as well as allowing aliases for easier specification
of the identifiers by the user.
.. todo::
document this class's usage
Class Methods
=============
.. todo:: document using() and needs_update() options
"""
#===================================================================
# class attrs
#===================================================================
default_ident = None # should be unicode
ident_values = None # should be list of unicode strings
ident_aliases = None # should be dict of unicode -> unicode
# NOTE: any aliases provided to norm_ident() as bytes
# will have been converted to unicode before
# comparing against this dictionary.
# NOTE: relying on test_06_HasManyIdents() to verify
# these are configured correctly.
#===================================================================
# instance attrs
#===================================================================
ident = None
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, # keyword only...
default_ident=None, ident=None, **kwds):
"""
This mixin adds support for the following :meth:`~zdppy_password_hash.ifc.PasswordHash.using` keywords:
:param default_ident:
default identifier that will be used by resulting customized hasher.
:param ident:
supported as alternate alias for **default_ident**.
"""
# resolve aliases
if ident is not None:
if default_ident is not None:
raise TypeError("'default_ident' and 'ident' are mutually exclusive")
default_ident = ident
# create subclass
subcls = super(HasManyIdents, cls).using(**kwds)
# add custom default ident
# (NOTE: creates instance to run value through _norm_ident())
if default_ident is not None:
subcls.default_ident = cls(ident=default_ident, use_defaults=True).ident
return subcls
#===================================================================
# init
#===================================================================
def __init__(self, ident=None, **kwds):
super(HasManyIdents, self).__init__(**kwds)
# init ident
if ident is not None:
ident = self._norm_ident(ident)
elif self.use_defaults:
ident = self.default_ident
assert validate_default_value(self, ident, self._norm_ident, param="default_ident")
else:
raise TypeError("no ident specified")
self.ident = ident
@classmethod
def _norm_ident(cls, ident):
"""
helper which normalizes & validates 'ident' value.
"""
# handle bytes
assert ident is not None
if isinstance(ident, bytes):
ident = ident.decode('ascii')
# check if identifier is valid
iv = cls.ident_values
if ident in iv:
return ident
# resolve aliases, and recheck against ident_values
ia = cls.ident_aliases
if ia:
try:
value = ia[ident]
except KeyError:
pass
else:
if value in iv:
return value
# failure!
# XXX: give this it's own error type?
raise ValueError("invalid ident: %r" % (ident,))
#===================================================================
# password hash api
#===================================================================
@classmethod
def identify(cls, hash):
hash = to_unicode_for_identify(hash)
return hash.startswith(cls.ident_values)
@classmethod
def _parse_ident(cls, hash):
"""extract ident prefix from hash, helper for subclasses' from_string()"""
hash = to_unicode(hash, "ascii", "hash")
for ident in cls.ident_values:
if hash.startswith(ident):
return ident, hash[len(ident):]
raise exc.InvalidHashError(cls)
# XXX: implement a needs_update() helper that marks everything but default_ident as deprecated?
#===================================================================
# eoc
#===================================================================
#------------------------------------------------------------------------
# salt mixins
#------------------------------------------------------------------------
class HasSalt(GenericHandler):
"""mixin for validating salts.
This :class:`GenericHandler` mixin adds a ``salt`` keyword to the class constuctor;
any value provided is passed through the :meth:`_norm_salt` method,
which takes care of validating salt length and content,
as well as generating new salts if one it not provided.
:param salt:
optional salt string
:param salt_size:
optional size of salt (only used if no salt provided);
defaults to :attr:`default_salt_size`.
Class Attributes
================
In order for :meth:`!_norm_salt` to do its job, the following
attributes should be provided by the handler subclass:
.. attribute:: min_salt_size
The minimum number of characters allowed in a salt string.
An :exc:`ValueError` will be throw if the provided salt is too small.
Defaults to ``0``.
.. attribute:: max_salt_size
The maximum number of characters allowed in a salt string.
By default an :exc:`ValueError` will be throw if the provided salt is
too large; but if ``relaxed=True``, it will be clipped and a warning
issued instead. Defaults to ``None``, for no maximum.
.. attribute:: default_salt_size
[required]
If no salt is provided, this should specify the size of the salt
that will be generated by :meth:`_generate_salt`. By default
this will fall back to :attr:`max_salt_size`.
.. attribute:: salt_chars
A string containing all the characters which are allowed in the salt
string. An :exc:`ValueError` will be throw if any other characters
are encountered. May be set to ``None`` to skip this check (but see
in :attr:`default_salt_chars`).
.. attribute:: default_salt_chars
[required]
This attribute controls the set of characters use to generate
*new* salt strings. By default, it mirrors :attr:`salt_chars`.
If :attr:`!salt_chars` is ``None``, this attribute must be specified
in order to generate new salts. Aside from that purpose,
the main use of this attribute is for hashes which wish to generate
salts from a restricted subset of :attr:`!salt_chars`; such as
accepting all characters, but only using a-z.
Instance Attributes
===================
.. attribute:: salt
This instance attribute will be filled in with the salt provided
to the constructor (as adapted by :meth:`_norm_salt`)
Subclassable Methods
====================
.. automethod:: _norm_salt
.. automethod:: _generate_salt
"""
# TODO: document _truncate_salt()
# XXX: allow providing raw salt to this class, and encoding it?
#===================================================================
# class attrs
#===================================================================
min_salt_size = 0
max_salt_size = None
salt_chars = None
@classproperty
def default_salt_size(cls):
"""default salt size (defaults to *max_salt_size*)"""
return cls.max_salt_size
@classproperty
def default_salt_chars(cls):
"""charset used to generate new salt strings (defaults to *salt_chars*)"""
return cls.salt_chars
# private helpers for HasRawSalt, shouldn't be used by subclasses
_salt_is_bytes = False
_salt_unit = "chars"
# TODO: could support using(min/max_desired_salt_size) via using() and needs_update()
#===================================================================
# instance attrs
#===================================================================
salt = None
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, # keyword only...
default_salt_size=None,
salt_size=None, # aliases used by CryptContext
salt=None,
**kwds):
# check for aliases used by CryptContext
if salt_size is not None:
if default_salt_size is not None:
raise TypeError("'salt_size' and 'default_salt_size' aliases are mutually exclusive")
default_salt_size = salt_size
# generate new subclass
subcls = super(HasSalt, cls).using(**kwds)
# replace default_rounds
relaxed = kwds.get("relaxed")
if default_salt_size is not None:
if isinstance(default_salt_size, native_string_types):
default_salt_size = int(default_salt_size)
subcls.default_salt_size = subcls._clip_to_valid_salt_size(default_salt_size,
param="salt_size",
relaxed=relaxed)
# if salt specified, replace _generate_salt() with fixed output.
# NOTE: this is mainly useful for testing / debugging.
if salt is not None:
salt = subcls._norm_salt(salt, relaxed=relaxed)
subcls._generate_salt = staticmethod(lambda: salt)
return subcls
# XXX: would like to combine w/ _norm_salt() code below, but doesn't quite fit.
@classmethod
def _clip_to_valid_salt_size(cls, salt_size, param="salt_size", relaxed=True):
"""
internal helper --
clip salt size value to handler's absolute limits (min_salt_size / max_salt_size)
:param relaxed:
if ``True`` (the default), issues PasslibHashWarning is rounds are outside allowed range.
if ``False``, raises a ValueError instead.
:param param:
optional name of parameter to insert into error/warning messages.
:returns:
clipped rounds value
"""
mn = cls.min_salt_size
mx = cls.max_salt_size
# check if salt size is fixed
if mn == mx:
if salt_size != mn:
msg = "%s: %s (%d) must be exactly %d" % (cls.name, param, salt_size, mn)
if relaxed:
warn(msg, PasslibHashWarning)
else:
raise ValueError(msg)
return mn
# check min size
if salt_size < mn:
msg = "%s: %s (%r) below min_salt_size (%d)" % (cls.name, param, salt_size, mn)
if relaxed:
warn(msg, PasslibHashWarning)
salt_size = mn
else:
raise ValueError(msg)
# check max size
if mx and salt_size > mx:
msg = "%s: %s (%r) above max_salt_size (%d)" % (cls.name, param, salt_size, mx)
if relaxed:
warn(msg, PasslibHashWarning)
salt_size = mx
else:
raise ValueError(msg)
return salt_size
#===================================================================
# init
#===================================================================
def __init__(self, salt=None, **kwds):
super(HasSalt, self).__init__(**kwds)
if salt is not None:
salt = self._parse_salt(salt)
elif self.use_defaults:
salt = self._generate_salt()
assert self._norm_salt(salt) == salt, "generated invalid salt: %r" % (salt,)
else:
raise TypeError("no salt specified")
self.salt = salt
# NOTE: split out mainly so sha256_crypt can subclass this
def _parse_salt(self, salt):
return self._norm_salt(salt)
@classmethod
def _norm_salt(cls, salt, relaxed=False):
"""helper to normalize & validate user-provided salt string
:arg salt:
salt string
:raises TypeError:
If salt not correct type.
:raises ValueError:
* if salt contains chars that aren't in :attr:`salt_chars`.
* if salt contains less than :attr:`min_salt_size` characters.
* if ``relaxed=False`` and salt has more than :attr:`max_salt_size`
characters (if ``relaxed=True``, the salt is truncated
and a warning is issued instead).
:returns:
normalized salt
"""
# check type
if cls._salt_is_bytes:
if not isinstance(salt, bytes):
raise exc.ExpectedTypeError(salt, "bytes", "salt")
else:
if not isinstance(salt, unicode):
# NOTE: allowing bytes under py2 so salt can be native str.
if isinstance(salt, bytes) and (PY2 or relaxed):
salt = salt.decode("ascii")
else:
raise exc.ExpectedTypeError(salt, "unicode", "salt")
# check charset
sc = cls.salt_chars
if sc is not None and any(c not in sc for c in salt):
raise ValueError("invalid characters in %s salt" % cls.name)
# check min size
mn = cls.min_salt_size
if mn and len(salt) < mn:
msg = "salt too small (%s requires %s %d %s)" % (cls.name,
"exactly" if mn == cls.max_salt_size else ">=", mn, cls._salt_unit)
raise ValueError(msg)
# check max size
mx = cls.max_salt_size
if mx and len(salt) > mx:
msg = "salt too large (%s requires %s %d %s)" % (cls.name,
"exactly" if mx == mn else "<=", mx, cls._salt_unit)
if relaxed:
warn(msg, PasslibHashWarning)
salt = cls._truncate_salt(salt, mx)
else:
raise ValueError(msg)
return salt
@staticmethod
def _truncate_salt(salt, mx):
# NOTE: some hashes (e.g. bcrypt) has structure within their
# salt string. this provides a method to override to perform
# the truncation properly
return salt[:mx]
@classmethod
def _generate_salt(cls):
"""
helper method for _init_salt(); generates a new random salt string.
"""
return getrandstr(rng, cls.default_salt_chars, cls.default_salt_size)
@classmethod
def bitsize(cls, salt_size=None, **kwds):
"""[experimental method] return info about bitsizes of hash"""
info = super(HasSalt, cls).bitsize(**kwds)
if salt_size is None:
salt_size = cls.default_salt_size
# FIXME: this may overestimate size due to padding bits
# FIXME: this will be off by 1 for case-insensitive hashes.
info['salt'] = _bitsize(salt_size, cls.default_salt_chars)
return info
#===================================================================
# eoc
#===================================================================
class HasRawSalt(HasSalt):
"""mixin for classes which use decoded salt parameter
A variant of :class:`!HasSalt` which takes in decoded bytes instead of an encoded string.
.. todo::
document this class's usage
"""
salt_chars = ALL_BYTE_VALUES
# NOTE: all HasRawSalt code is currently part of HasSalt, using private
# '_salt_is_bytes' flag. this arrangement may be changed in the future.
_salt_is_bytes = True
_salt_unit = "bytes"
@classmethod
def _generate_salt(cls):
assert cls.salt_chars in [None, ALL_BYTE_VALUES]
return getrandbytes(rng, cls.default_salt_size)
#------------------------------------------------------------------------
# rounds mixin
#------------------------------------------------------------------------
class HasRounds(GenericHandler):
"""mixin for validating rounds parameter
This :class:`GenericHandler` mixin adds a ``rounds`` keyword to the class
constuctor; any value provided is passed through the :meth:`_norm_rounds`
method, which takes care of validating the number of rounds.
:param rounds: optional number of rounds hash should use
Class Attributes
================
In order for :meth:`!_norm_rounds` to do its job, the following
attributes must be provided by the handler subclass:
.. attribute:: min_rounds
The minimum number of rounds allowed. A :exc:`ValueError` will be
thrown if the rounds value is too small. Defaults to ``0``.
.. attribute:: max_rounds
The maximum number of rounds allowed. A :exc:`ValueError` will be
thrown if the rounds value is larger than this. Defaults to ``None``
which indicates no limit to the rounds value.
.. attribute:: default_rounds
If no rounds value is provided to constructor, this value will be used.
If this is not specified, a rounds value *must* be specified by the
application.
.. attribute:: rounds_cost
[required]
The ``rounds`` parameter typically encodes a cpu-time cost
for calculating a hash. This should be set to ``"linear"``
(the default) or ``"log2"``, depending on how the rounds value relates
to the actual amount of time that will be required.
Class Methods
=============
.. todo:: document using() and needs_update() options
Instance Attributes
===================
.. attribute:: rounds
This instance attribute will be filled in with the rounds value provided
to the constructor (as adapted by :meth:`_norm_rounds`)
Subclassable Methods
====================
.. automethod:: _norm_rounds
"""
#===================================================================
# class attrs
#===================================================================
#-----------------
# algorithm options -- not application configurable
#-----------------
# XXX: rename to min_valid_rounds / max_valid_rounds,
# to clarify role compared to min_desired_rounds / max_desired_rounds?
min_rounds = 0
max_rounds = None
rounds_cost = "linear" # default to the common case
# hack to pass info to _CryptRecord (will be removed in zdppy_password_hash 2.0)
using_rounds_kwds = ("min_desired_rounds", "max_desired_rounds",
"min_rounds", "max_rounds",
"default_rounds", "vary_rounds")
#-----------------
# desired & default rounds -- configurable via .using() classmethod
#-----------------
min_desired_rounds = None
max_desired_rounds = None
default_rounds = None
vary_rounds = None
#===================================================================
# instance attrs
#===================================================================
rounds = None
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, # keyword only...
min_desired_rounds=None, max_desired_rounds=None,
default_rounds=None, vary_rounds=None,
min_rounds=None, max_rounds=None, rounds=None, # aliases used by CryptContext
**kwds):
# check for aliases used by CryptContext
if min_rounds is not None:
if min_desired_rounds is not None:
raise TypeError("'min_rounds' and 'min_desired_rounds' aliases are mutually exclusive")
min_desired_rounds = min_rounds
if max_rounds is not None:
if max_desired_rounds is not None:
raise TypeError("'max_rounds' and 'max_desired_rounds' aliases are mutually exclusive")
max_desired_rounds = max_rounds
# use 'rounds' as fallback for min, max, AND default
# XXX: would it be better to make 'default_rounds' and 'rounds'
# aliases, and have a separate 'require_rounds' parameter for this behavior?
if rounds is not None:
if min_desired_rounds is None:
min_desired_rounds = rounds
if max_desired_rounds is None:
max_desired_rounds = rounds
if default_rounds is None:
default_rounds = rounds
# generate new subclass
subcls = super(HasRounds, cls).using(**kwds)
# replace min_desired_rounds
relaxed = kwds.get("relaxed")
if min_desired_rounds is None:
explicit_min_rounds = False
min_desired_rounds = cls.min_desired_rounds
else:
explicit_min_rounds = True
if isinstance(min_desired_rounds, native_string_types):
min_desired_rounds = int(min_desired_rounds)
subcls.min_desired_rounds = subcls._norm_rounds(min_desired_rounds,
param="min_desired_rounds",
relaxed=relaxed)
# replace max_desired_rounds
if max_desired_rounds is None:
max_desired_rounds = cls.max_desired_rounds
else:
if isinstance(max_desired_rounds, native_string_types):
max_desired_rounds = int(max_desired_rounds)
if min_desired_rounds and max_desired_rounds < min_desired_rounds:
msg = "%s: max_desired_rounds (%r) below min_desired_rounds (%r)" % \
(subcls.name, max_desired_rounds, min_desired_rounds)
if explicit_min_rounds:
raise ValueError(msg)
else:
warn(msg, PasslibConfigWarning)
max_desired_rounds = min_desired_rounds
subcls.max_desired_rounds = subcls._norm_rounds(max_desired_rounds,
param="max_desired_rounds",
relaxed=relaxed)
# replace default_rounds
if default_rounds is not None:
if isinstance(default_rounds, native_string_types):
default_rounds = int(default_rounds)
if min_desired_rounds and default_rounds < min_desired_rounds:
raise ValueError("%s: default_rounds (%r) below min_desired_rounds (%r)" %
(subcls.name, default_rounds, min_desired_rounds))
elif max_desired_rounds and default_rounds > max_desired_rounds:
raise ValueError("%s: default_rounds (%r) above max_desired_rounds (%r)" %
(subcls.name, default_rounds, max_desired_rounds))
subcls.default_rounds = subcls._norm_rounds(default_rounds,
param="default_rounds",
relaxed=relaxed)
# clip default rounds to new limits.
if subcls.default_rounds is not None:
subcls.default_rounds = subcls._clip_to_desired_rounds(subcls.default_rounds)
# replace / set vary_rounds
if vary_rounds is not None:
if isinstance(vary_rounds, native_string_types):
if vary_rounds.endswith("%"):
vary_rounds = float(vary_rounds[:-1]) * 0.01
elif "." in vary_rounds:
vary_rounds = float(vary_rounds)
else:
vary_rounds = int(vary_rounds)
if vary_rounds < 0:
raise ValueError("%s: vary_rounds (%r) below 0" %
(subcls.name, vary_rounds))
elif isinstance(vary_rounds, float):
# TODO: deprecate / disallow vary_rounds=1.0
if vary_rounds > 1:
raise ValueError("%s: vary_rounds (%r) above 1.0" %
(subcls.name, vary_rounds))
elif not isinstance(vary_rounds, int):
raise TypeError("vary_rounds must be int or float")
if vary_rounds:
warn("The 'vary_rounds' option is deprecated as of Passlib 1.7, "
"and will be removed in Passlib 2.0", PasslibConfigWarning)
subcls.vary_rounds = vary_rounds
# XXX: could cache _calc_vary_rounds_range() here if needed,
# but would need to handle user manually changing .default_rounds
return subcls
@classmethod
def _clip_to_desired_rounds(cls, rounds):
"""
helper for :meth:`_generate_rounds` --
clips rounds value to desired min/max set by class (if any)
"""
# NOTE: min/max_desired_rounds are None if unset.
# check minimum
mnd = cls.min_desired_rounds or 0
if rounds < mnd:
return mnd
# check maximum
mxd = cls.max_desired_rounds
if mxd and rounds > mxd:
return mxd
return rounds
@classmethod
def _calc_vary_rounds_range(cls, default_rounds):
"""
helper for :meth:`_generate_rounds` --
returns range for vary rounds generation.
:returns:
(lower, upper) limits suitable for random.randint()
"""
# XXX: could precalculate output of this in using() method, and save per-hash cost.
# but then users patching cls.vary_rounds / cls.default_rounds would get wrong value.
assert default_rounds
vary_rounds = cls.vary_rounds
# if vary_rounds specified as % of default, convert it to actual rounds
def linear_to_native(value, upper):
return value
if isinstance(vary_rounds, float):
assert 0 <= vary_rounds <= 1 # TODO: deprecate vary_rounds==1
if cls.rounds_cost == "log2":
# special case -- have to convert default_rounds to linear scale,
# apply +/- vary_rounds to that, and convert back to log scale again.
# linear_to_native() takes care of the "convert back" step.
default_rounds = 1 << default_rounds
def linear_to_native(value, upper):
if value <= 0: # log() undefined for <= 0
return 0
elif upper: # use smallest upper bound for start of range
return int(math.log(value, 2))
else: # use greatest lower bound for end of range
return int(math.ceil(math.log(value, 2)))
# calculate integer vary rounds based on current default_rounds
vary_rounds = int(default_rounds * vary_rounds)
# calculate bounds based on default_rounds +/- vary_rounds
assert vary_rounds >= 0 and isinstance(vary_rounds, int_types)
lower = linear_to_native(default_rounds - vary_rounds, False)
upper = linear_to_native(default_rounds + vary_rounds, True)
return cls._clip_to_desired_rounds(lower), cls._clip_to_desired_rounds(upper)
#===================================================================
# init
#===================================================================
def __init__(self, rounds=None, **kwds):
super(HasRounds, self).__init__(**kwds)
if rounds is not None:
rounds = self._parse_rounds(rounds)
elif self.use_defaults:
rounds = self._generate_rounds()
assert self._norm_rounds(rounds) == rounds, "generated invalid rounds: %r" % (rounds,)
else:
raise TypeError("no rounds specified")
self.rounds = rounds
# NOTE: split out mainly so sha256_crypt & bsdi_crypt can subclass this
def _parse_rounds(self, rounds):
return self._norm_rounds(rounds)
@classmethod
def _norm_rounds(cls, rounds, relaxed=False, param="rounds"):
"""
helper for normalizing rounds value.
:arg rounds:
an integer cost parameter.
:param relaxed:
if ``True`` (the default), issues PasslibHashWarning is rounds are outside allowed range.
if ``False``, raises a ValueError instead.
:param param:
optional name of parameter to insert into error/warning messages.
:raises TypeError:
* if ``use_defaults=False`` and no rounds is specified
* if rounds is not an integer.
:raises ValueError:
* if rounds is ``None`` and class does not specify a value for
:attr:`default_rounds`.
* if ``relaxed=False`` and rounds is outside bounds of
:attr:`min_rounds` and :attr:`max_rounds` (if ``relaxed=True``,
the rounds value will be clamped, and a warning issued).
:returns:
normalized rounds value
"""
return norm_integer(cls, rounds, cls.min_rounds, cls.max_rounds,
param=param, relaxed=relaxed)
@classmethod
def _generate_rounds(cls):
"""
internal helper for :meth:`_norm_rounds` --
returns default rounds value, incorporating vary_rounds,
and any other limitations hash may place on rounds parameter.
"""
# load default rounds
rounds = cls.default_rounds
if rounds is None:
raise TypeError("%s rounds value must be specified explicitly" % (cls.name,))
# randomly vary the rounds slightly basic on vary_rounds parameter.
# reads default_rounds internally.
if cls.vary_rounds:
lower, upper = cls._calc_vary_rounds_range(rounds)
assert lower <= rounds <= upper
if lower < upper:
rounds = rng.randint(lower, upper)
return rounds
#===================================================================
# migration interface
#===================================================================
def _calc_needs_update(self, **kwds):
"""
mark hash as needing update if rounds is outside desired bounds.
"""
min_desired_rounds = self.min_desired_rounds
if min_desired_rounds and self.rounds < min_desired_rounds:
return True
max_desired_rounds = self.max_desired_rounds
if max_desired_rounds and self.rounds > max_desired_rounds:
return True
return super(HasRounds, self)._calc_needs_update(**kwds)
#===================================================================
# experimental methods
#===================================================================
@classmethod
def bitsize(cls, rounds=None, vary_rounds=.1, **kwds):
"""[experimental method] return info about bitsizes of hash"""
info = super(HasRounds, cls).bitsize(**kwds)
# NOTE: this essentially estimates how many bits of "salt"
# can be added by varying the rounds value just a little bit.
if cls.rounds_cost != "log2":
# assume rounds can be randomized within the range
# rounds*(1-vary_rounds) ... rounds*(1+vary_rounds)
# then this can be used to encode
# log2(rounds*(1+vary_rounds)-rounds*(1-vary_rounds))
# worth of salt-like bits. this works out to
# 1+log2(rounds*vary_rounds)
import math
if rounds is None:
rounds = cls.default_rounds
info['rounds'] = max(0, int(1+math.log(rounds*vary_rounds,2)))
## else: # log2 rounds
# all bits of the rounds value are critical to choosing
# the time-cost, and can't be randomized.
return info
#===================================================================
# eoc
#===================================================================
#------------------------------------------------------------------------
# other common parameters
#------------------------------------------------------------------------
class ParallelismMixin(GenericHandler):
"""
mixin which provides common behavior for 'parallelism' setting
"""
#===================================================================
# class attrs
#===================================================================
# NOTE: subclasses should add "parallelism" to their settings_kwds
#===================================================================
# instance attrs
#===================================================================
#: parallelism setting (class-level value used as default)
parallelism = 1
#===================================================================
# variant constructor
#===================================================================
@classmethod
def using(cls, parallelism=None, **kwds):
subcls = super(ParallelismMixin, cls).using(**kwds)
if parallelism is not None:
if isinstance(parallelism, native_string_types):
parallelism = int(parallelism)
subcls.parallelism = subcls._norm_parallelism(parallelism, relaxed=kwds.get("relaxed"))
return subcls
#===================================================================
# init
#===================================================================
def __init__(self, parallelism=None, **kwds):
super(ParallelismMixin, self).__init__(**kwds)
# init parallelism
if parallelism is None:
assert validate_default_value(self, self.parallelism, self._norm_parallelism,
param="parallelism")
else:
self.parallelism = self._norm_parallelism(parallelism)
@classmethod
def _norm_parallelism(cls, parallelism, relaxed=False):
return norm_integer(cls, parallelism, min=1, param="parallelism", relaxed=relaxed)
#===================================================================
# hash migration
#===================================================================
def _calc_needs_update(self, **kwds):
"""
mark hash as needing update if rounds is outside desired bounds.
"""
# XXX: for now, marking all hashes which don't have matching parallelism setting
if self.parallelism != type(self).parallelism:
return True
return super(ParallelismMixin, self)._calc_needs_update(**kwds)
#===================================================================
# eoc
#===================================================================
#------------------------------------------------------------------------
# backend mixin & helpers
#------------------------------------------------------------------------
#: global lock that must be held when changing backends.
#: not bothering to make this more granular, as backend switching
#: isn't a speed-critical path. lock is needed since there is some
#: class-level state that may be modified during a "dry run"
_backend_lock = threading.RLock()
class BackendMixin(PasswordHash):
"""
PasswordHash mixin which provides generic framework for supporting multiple backends
within the class.
Public API
----------
.. attribute:: backends
This attribute should be a tuple containing the names of the backends
which are supported. Two common names are ``"os_crypt"`` (if backend
uses :mod:`crypt`), and ``"builtin"`` (if the backend is a pure-python
fallback).
.. automethod:: get_backend
.. automethod:: set_backend
.. automethod:: has_backend
.. warning::
:meth:`set_backend` is intended to be called during application startup --
it affects global state, and switching backends is not guaranteed threadsafe.
Private API (Subclass Hooks)
----------------------------
Subclasses should set the :attr:`!backends` attribute to a tuple of the backends
they wish to support. They should also define one method:
.. classmethod:: _load_backend_{name}(dryrun=False)
One copy of this method should be defined for each :samp:`name` within :attr:`!backends`.
It will be called in order to load the backend, and should take care of whatever
is needed to enable the backend. This may include importing modules, running tests,
issuing warnings, etc.
:param name:
[Optional] name of backend.
:param dryrun:
[Optional] True/False if currently performing a "dry run".
if True, the method should perform all setup actions *except*
switching the class over to the new backend.
:raises zdppy_password_hash.exc.PasslibSecurityError:
if the backend is available, but cannot be loaded due to a security issue.
:returns:
False if backend not available, True if backend loaded.
.. warning::
Due to the way zdppy_password_hash's internals are arranged,
backends should generally store stateful data at the class level
(not the module level), and be prepared to be called on subclasses
which may be set to a different backend from their parent.
(Idempotent module-level data such as lazy imports are fine).
.. automethod:: _finalize_backend
.. versionadded:: 1.7
"""
#===================================================================
# class attrs
#===================================================================
#: list of backend names, provided by subclass.
backends = None
#: private attr mixin uses to hold currently loaded backend (or ``None``)
__backend = None
#: optional class-specific text containing suggestion about what to do
#: when no backends are available.
_no_backend_suggestion = None
#: shared attr used by set_backend() to indicate what backend it's loaded;
#: meaningless while not in set_backend().
_pending_backend = None
#: shared attr used by set_backend() to indicate if it's in "dry run" mode;
#: meaningless while not in set_backend().
_pending_dry_run = False
#===================================================================
# public api
#===================================================================
@classmethod
def get_backend(cls):
"""
Return name of currently active backend.
if no backend has been loaded, loads and returns name of default backend.
:raises zdppy_password_hash.exc.MissingBackendError:
if no backends are available.
:returns:
name of active backend
"""
if not cls.__backend:
cls.set_backend()
assert cls.__backend, "set_backend() failed to load a default backend"
return cls.__backend
@classmethod
def has_backend(cls, name="any"):
"""
Check if support is currently available for specified backend.
:arg name:
name of backend to check for.
can be any string accepted by :meth:`set_backend`.
:raises ValueError:
if backend name is unknown
:returns:
* ``True`` if backend is available.
* ``False`` if it's available / can't be loaded.
* ``None`` if it's present, but won't load due to a security issue.
"""
try:
cls.set_backend(name, dryrun=True)
return True
except (exc.MissingBackendError, exc.PasslibSecurityError):
return False
@classmethod
def set_backend(cls, name="any", dryrun=False):
"""
Load specified backend.
:arg name:
name of backend to load, can be any of the following:
* ``"any"`` -- use current backend if one is loaded,
otherwise load the first available backend.
* ``"default"`` -- use the first available backend.
* any string in :attr:`backends`, loads specified backend.
:param dryrun:
If True, this perform all setup actions *except* switching over to the new backend.
(this flag is used to implement :meth:`has_backend`).
.. versionadded:: 1.7
:raises ValueError:
If backend name is unknown.
:raises zdppy_password_hash.exc.MissingBackendError:
If specific backend is missing;
or in the case of ``"any"`` / ``"default"``, if *no* backends are available.
:raises zdppy_password_hash.exc.PasslibSecurityError:
If ``"any"`` or ``"default"`` was specified,
but the only backend available has a PasslibSecurityError.
"""
# check if active backend is acceptable
if (name == "any" and cls.__backend) or (name and name == cls.__backend):
return cls.__backend
# if this isn't the final subclass, whose bases we can modify,
# find that class, and recursively call this method for the proper class.
owner = cls._get_backend_owner()
if owner is not cls:
return owner.set_backend(name, dryrun=dryrun)
# pick first available backend
if name == "any" or name == "default":
default_error = None
for name in cls.backends:
try:
return cls.set_backend(name, dryrun=dryrun)
except exc.MissingBackendError:
continue
except exc.PasslibSecurityError as err:
# backend is available, but refuses to load due to security issue.
if default_error is None:
default_error = err
continue
if default_error is None:
msg = "%s: no backends available" % cls.name
if cls._no_backend_suggestion:
msg += cls._no_backend_suggestion
default_error = exc.MissingBackendError(msg)
raise default_error
# validate name
if name not in cls.backends:
raise exc.UnknownBackendError(cls, name)
# hand off to _set_backend()
with _backend_lock:
orig = cls._pending_backend, cls._pending_dry_run
try:
cls._pending_backend = name
cls._pending_dry_run = dryrun
cls._set_backend(name, dryrun)
finally:
cls._pending_backend, cls._pending_dry_run = orig
if not dryrun:
cls.__backend = name
return name
#===================================================================
# subclass hooks
#===================================================================
@classmethod
def _get_backend_owner(cls):
"""
return class that set_backend() should actually be modifying.
for SubclassBackendMixin, this may not always be the class that was invoked.
"""
return cls
@classmethod
def _set_backend(cls, name, dryrun):
"""
Internal method invoked by :meth:`set_backend`.
handles actual loading of specified backend.
global _backend_lock will be held for duration of this method,
and _pending_dry_run & _pending_backend will also be set.
should return True / False.
"""
loader = cls._get_backend_loader(name)
kwds = {}
if accepts_keyword(loader, "name"):
kwds['name'] = name
if accepts_keyword(loader, "dryrun"):
kwds['dryrun'] = dryrun
ok = loader(**kwds)
if ok is False:
raise exc.MissingBackendError("%s: backend not available: %s" %
(cls.name, name))
elif ok is not True:
raise AssertionError("backend loaders must return True or False"
": %r" % (ok,))
@classmethod
def _get_backend_loader(cls, name):
"""
Hook called to get the specified backend's loader.
Should return callable which optionally takes ``"name"`` and/or
``"dryrun"`` keywords.
Callable should return True if backend initialized successfully.
If backend can't be loaded, callable should return False
OR raise MissingBackendError directly.
"""
raise NotImplementedError("implement in subclass")
@classmethod
def _stub_requires_backend(cls):
"""
helper for subclasses to create stub methods which auto-load backend.
"""
if cls.__backend:
raise AssertionError("%s: _finalize_backend(%r) failed to replace lazy loader" %
(cls.name, cls.__backend))
cls.set_backend()
if not cls.__backend:
raise AssertionError("%s: set_backend() failed to load a default backend" %
(cls.name))
#===================================================================
# eoc
#===================================================================
class SubclassBackendMixin(BackendMixin):
"""
variant of BackendMixin which allows backends to be implemented
as separate mixin classes, and dynamically switches them out.
backend classes should implement a _load_backend() classmethod,
which will be invoked with an optional 'dryrun' keyword,
and should return True or False.
_load_backend() will be invoked with ``cls`` equal to the mixin,
*not* the overall class.
.. versionadded:: 1.7
"""
#===================================================================
# class attrs
#===================================================================
# 'backends' required by BackendMixin
#: NON-INHERITED flag that this class's bases should be modified by SubclassBackendMixin.
#: should only be set to True in *one* subclass in hierarchy.
_backend_mixin_target = False
#: map of backend name -> mixin class
_backend_mixin_map = None
#===================================================================
# backend loading
#===================================================================
@classmethod
def _get_backend_owner(cls):
"""
return base class that we're actually switching backends on
(needed in since backends frequently modify class attrs,
and .set_backend may be called from a subclass).
"""
if not cls._backend_mixin_target:
raise AssertionError("_backend_mixin_target not set")
for base in cls.__mro__:
if base.__dict__.get("_backend_mixin_target"):
return base
raise AssertionError("expected to find class w/ '_backend_mixin_target' set")
@classmethod
def _set_backend(cls, name, dryrun):
# invoke backend loader (will throw error if fails)
super(SubclassBackendMixin, cls)._set_backend(name, dryrun)
# sanity check call args (should trust .set_backend, but will really
# foul things up if this isn't the owner)
assert cls is cls._get_backend_owner(), "_finalize_backend() not invoked on owner"
# pick mixin class
mixin_map = cls._backend_mixin_map
assert mixin_map, "_backend_mixin_map not specified"
mixin_cls = mixin_map[name]
assert issubclass(mixin_cls, SubclassBackendMixin), "invalid mixin class"
# modify <cls> to remove existing backend mixins, and insert the new one
update_mixin_classes(cls,
add=mixin_cls,
remove=mixin_map.values(),
append=True, before=SubclassBackendMixin,
dryrun=dryrun,
)
@classmethod
def _get_backend_loader(cls, name):
assert cls._backend_mixin_map, "_backend_mixin_map not specified"
return cls._backend_mixin_map[name]._load_backend_mixin
#===================================================================
# eoc
#===================================================================
# XXX: rename to ChecksumBackendMixin?
class HasManyBackends(BackendMixin, GenericHandler):
"""
GenericHandler mixin which provides selecting from multiple backends.
.. todo::
finish documenting this class's usage
For hashes which need to select from multiple backends,
depending on the host environment, this class
offers a way to specify alternate :meth:`_calc_checksum` methods,
and will dynamically chose the best one at runtime.
.. versionchanged:: 1.7
This class now derives from :class:`BackendMixin`, which abstracts
out a more generic framework for supporting multiple backends.
The public api (:meth:`!get_backend`, :meth:`!has_backend`, :meth:`!set_backend`)
is roughly the same.
Private API (Subclass Hooks)
----------------------------
As of version 1.7, classes should implement :meth:`!_load_backend_{name}`, per
:class:`BackendMixin`. This hook should invoke :meth:`!_set_calc_checksum_backcend`
to install it's backend method.
.. deprecated:: 1.7
The following api is deprecated, and will be removed in Passlib 2.0:
.. attribute:: _has_backend_{name}
private class attribute checked by :meth:`has_backend` to see if a
specific backend is available, it should be either ``True``
or ``False``. One of these should be provided by
the subclass for each backend listed in :attr:`backends`.
.. classmethod:: _calc_checksum_{name}
private class method that should implement :meth:`_calc_checksum`
for a given backend. it will only be called if the backend has
been selected by :meth:`set_backend`. One of these should be provided
by the subclass for each backend listed in :attr:`backends`.
"""
#===================================================================
# digest calculation
#===================================================================
def _calc_checksum(self, secret):
"wrapper for backend, for common code"""
# NOTE: not overwriting _calc_checksum() directly, so that classes can provide
# common behavior in that method,
# and then invoke _calc_checksum_backend() to do the work.
return self._calc_checksum_backend(secret)
def _calc_checksum_backend(self, secret):
"""
stub for _calc_checksum_backend() --
should load backend if one hasn't been loaded;
if one has been loaded, this method should have been monkeypatched by _finalize_backend().
"""
self._stub_requires_backend()
return self._calc_checksum_backend(secret)
#===================================================================
# BackendMixin hooks
#===================================================================
@classmethod
def _get_backend_loader(cls, name):
"""
subclassed to support legacy 1.6 HasManyBackends api.
(will be removed in zdppy_password_hash 2.0)
"""
# check for 1.7 loader
loader = getattr(cls, "_load_backend_" + name, None)
if loader is None:
# fallback to pre-1.7 _has_backend_xxx + _calc_checksum_xxx() api
def loader():
return cls.__load_legacy_backend(name)
else:
# make sure 1.6 api isn't defined at same time
assert not hasattr(cls, "_has_backend_" + name), (
"%s: can't specify both ._load_backend_%s() "
"and ._has_backend_%s" % (cls.name, name, name)
)
return loader
@classmethod
def __load_legacy_backend(cls, name):
value = getattr(cls, "_has_backend_" + name)
warn("%s: support for ._has_backend_%s is deprecated as of Passlib 1.7, "
"and will be removed in Passlib 1.9/2.0, please implement "
"._load_backend_%s() instead" % (cls.name, name, name),
DeprecationWarning,
)
if value:
func = getattr(cls, "_calc_checksum_" + name)
cls._set_calc_checksum_backend(func)
return True
else:
return False
@classmethod
def _set_calc_checksum_backend(cls, func):
"""
helper used by subclasses to validate & set backend-specific
calc checksum helper.
"""
backend = cls._pending_backend
assert backend, "should only be called during set_backend()"
if not callable(func):
raise RuntimeError("%s: backend %r returned invalid callable: %r" %
(cls.name, backend, func))
if not cls._pending_dry_run:
cls._calc_checksum_backend = func
#===================================================================
# eoc
#===================================================================
#=============================================================================
# wrappers
#=============================================================================
# XXX: should this inherit from PasswordHash?
class PrefixWrapper(object):
"""wraps another handler, adding a constant prefix.
instances of this class wrap another password hash handler,
altering the constant prefix that's prepended to the wrapped
handlers' hashes.
this is used mainly by the :doc:`ldap crypt <zdppy_password_hash.hash.ldap_crypt>` handlers;
such as :class:`~zdppy_password_hash.hash.ldap_md5_crypt` which wraps :class:`~zdppy_password_hash.hash.md5_crypt` and adds a ``{CRYPT}`` prefix.
usage::
myhandler = PrefixWrapper("myhandler", "md5_crypt", prefix="$mh$", orig_prefix="$1$")
:param name: name to assign to handler
:param wrapped: handler object or name of registered handler
:param prefix: identifying prefix to prepend to all hashes
:param orig_prefix: prefix to strip (defaults to '').
:param lazy: if True and wrapped handler is specified by name, don't look it up until needed.
"""
#: list of attributes which should be cloned by .using()
_using_clone_attrs = ()
def __init__(self, name, wrapped, prefix=u(''), orig_prefix=u(''), lazy=False,
doc=None, ident=None):
self.name = name
if isinstance(prefix, bytes):
prefix = prefix.decode("ascii")
self.prefix = prefix
if isinstance(orig_prefix, bytes):
orig_prefix = orig_prefix.decode("ascii")
self.orig_prefix = orig_prefix
if doc:
self.__doc__ = doc
if hasattr(wrapped, "name"):
self._set_wrapped(wrapped)
else:
self._wrapped_name = wrapped
if not lazy:
self._get_wrapped()
if ident is not None:
if ident is True:
# signal that prefix is identifiable in itself.
if prefix:
ident = prefix
else:
raise ValueError("no prefix specified")
if isinstance(ident, bytes):
ident = ident.decode("ascii")
# XXX: what if ident includes parts of wrapped hash's ident?
if ident[:len(prefix)] != prefix[:len(ident)]:
raise ValueError("ident must agree with prefix")
self._ident = ident
_wrapped_name = None
_wrapped_handler = None
def _set_wrapped(self, handler):
# check this is a valid handler
if 'ident' in handler.setting_kwds and self.orig_prefix:
# TODO: look into way to fix the issues.
warn("PrefixWrapper: 'orig_prefix' option may not work correctly "
"for handlers which have multiple identifiers: %r" %
(handler.name,), exc.PasslibRuntimeWarning)
# store reference
self._wrapped_handler = handler
def _get_wrapped(self):
handler = self._wrapped_handler
if handler is None:
handler = get_crypt_handler(self._wrapped_name)
self._set_wrapped(handler)
return handler
wrapped = property(_get_wrapped)
_ident = False
@property
def ident(self):
value = self._ident
if value is False:
value = None
# XXX: how will this interact with orig_prefix ?
# not exposing attrs for now if orig_prefix is set.
if not self.orig_prefix:
wrapped = self.wrapped
ident = getattr(wrapped, "ident", None)
if ident is not None:
value = self._wrap_hash(ident)
self._ident = value
return value
_ident_values = False
@property
def ident_values(self):
value = self._ident_values
if value is False:
value = None
# XXX: how will this interact with orig_prefix ?
# not exposing attrs for now if orig_prefix is set.
if not self.orig_prefix:
wrapped = self.wrapped
idents = getattr(wrapped, "ident_values", None)
if idents:
value = tuple(self._wrap_hash(ident) for ident in idents)
##else:
## ident = self.ident
## if ident is not None:
## value = [ident]
self._ident_values = value
return value
# attrs that should be proxied
# XXX: change this to proxy everything that doesn't start with "_"?
_proxy_attrs = (
"setting_kwds", "context_kwds",
"default_rounds", "min_rounds", "max_rounds", "rounds_cost",
"min_desired_rounds", "max_desired_rounds", "vary_rounds",
"default_salt_size", "min_salt_size", "max_salt_size",
"salt_chars", "default_salt_chars",
"backends", "has_backend", "get_backend", "set_backend",
"is_disabled", "truncate_size", "truncate_error",
"truncate_verify_reject",
# internal info attrs needed for test inspection
"_salt_is_bytes",
)
def __repr__(self):
args = [ repr(self._wrapped_name or self._wrapped_handler) ]
if self.prefix:
args.append("prefix=%r" % self.prefix)
if self.orig_prefix:
args.append("orig_prefix=%r" % self.orig_prefix)
args = ", ".join(args)
return 'PrefixWrapper(%r, %s)' % (self.name, args)
def __dir__(self):
attrs = set(dir(self.__class__))
attrs.update(self.__dict__)
wrapped = self.wrapped
attrs.update(
attr for attr in self._proxy_attrs
if hasattr(wrapped, attr)
)
return list(attrs)
def __getattr__(self, attr):
"""proxy most attributes from wrapped class (e.g. rounds, salt size, etc)"""
if attr in self._proxy_attrs:
return getattr(self.wrapped, attr)
raise AttributeError("missing attribute: %r" % (attr,))
def __setattr__(self, attr, value):
# if proxy attr present on wrapped object,
# and we own it, modify *it* instead.
# TODO: needs UTs
# TODO: any other cases where wrapped is "owned"?
# currently just if created via .using()
if attr in self._proxy_attrs and self._derived_from:
wrapped = self.wrapped
if hasattr(wrapped, attr):
setattr(wrapped, attr, value)
return
return object.__setattr__(self, attr, value)
def _unwrap_hash(self, hash):
"""given hash belonging to wrapper, return orig version"""
# NOTE: assumes hash has been validated as unicode already
prefix = self.prefix
if not hash.startswith(prefix):
raise exc.InvalidHashError(self)
# NOTE: always passing to handler as unicode, to save reconversion
return self.orig_prefix + hash[len(prefix):]
def _wrap_hash(self, hash):
"""given orig hash; return one belonging to wrapper"""
# NOTE: should usually be native string.
# (which does mean extra work under py2, but not py3)
if isinstance(hash, bytes):
hash = hash.decode("ascii")
orig_prefix = self.orig_prefix
if not hash.startswith(orig_prefix):
raise exc.InvalidHashError(self.wrapped)
wrapped = self.prefix + hash[len(orig_prefix):]
return uascii_to_str(wrapped)
#: set by _using(), helper for test harness' handler_derived_from()
_derived_from = None
def using(self, **kwds):
# generate subclass of wrapped handler
subcls = self.wrapped.using(**kwds)
assert subcls is not self.wrapped
# then create identical wrapper which wraps the new subclass.
wrapper = PrefixWrapper(self.name, subcls, prefix=self.prefix, orig_prefix=self.orig_prefix)
wrapper._derived_from = self
for attr in self._using_clone_attrs:
setattr(wrapper, attr, getattr(self, attr))
return wrapper
def needs_update(self, hash, **kwds):
hash = self._unwrap_hash(hash)
return self.wrapped.needs_update(hash, **kwds)
def identify(self, hash):
hash = to_unicode_for_identify(hash)
if not hash.startswith(self.prefix):
return False
hash = self._unwrap_hash(hash)
return self.wrapped.identify(hash)
@deprecated_method(deprecated="1.7", removed="2.0")
def genconfig(self, **kwds):
config = self.wrapped.genconfig(**kwds)
if config is None:
raise RuntimeError(".genconfig() must return a string, not None")
return self._wrap_hash(config)
@deprecated_method(deprecated="1.7", removed="2.0")
def genhash(self, secret, config, **kwds):
# TODO: under 2.0, throw TypeError if config is None, rather than passing it through
if config is not None:
config = to_unicode(config, "ascii", "config/hash")
config = self._unwrap_hash(config)
return self._wrap_hash(self.wrapped.genhash(secret, config, **kwds))
@deprecated_method(deprecated="1.7", removed="2.0", replacement=".hash()")
def encrypt(self, secret, **kwds):
return self.hash(secret, **kwds)
def hash(self, secret, **kwds):
return self._wrap_hash(self.wrapped.hash(secret, **kwds))
def verify(self, secret, hash, **kwds):
hash = to_unicode(hash, "ascii", "hash")
hash = self._unwrap_hash(hash)
return self.wrapped.verify(secret, hash, **kwds)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/handlers.py | handlers.py |
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
class OrderedDict(dict):
"""Dictionary that remembers insertion order"""
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/compat/_ordered_dict.py | _ordered_dict.py |
#=============================================================================
# figure out what we're running
#=============================================================================
#------------------------------------------------------------------------
# python version
#------------------------------------------------------------------------
import sys
PY2 = sys.version_info < (3,0)
PY3 = sys.version_info >= (3,0)
# make sure it's not an unsupported version, even if we somehow got this far
if sys.version_info < (2,6) or (3,0) <= sys.version_info < (3,2):
raise RuntimeError("Passlib requires Python 2.6, 2.7, or >= 3.2 (as of zdppy_password_hash 1.7)")
PY26 = sys.version_info < (2,7)
#------------------------------------------------------------------------
# python implementation
#------------------------------------------------------------------------
JYTHON = sys.platform.startswith('java')
PYPY = hasattr(sys, "pypy_version_info")
if PYPY and sys.pypy_version_info < (2,0):
raise RuntimeError("zdppy_password_hash requires pypy >= 2.0 (as of zdppy_password_hash 1.7)")
# e.g. '2.7.7\n[Pyston 0.5.1]'
# NOTE: deprecated support 2019-11
PYSTON = "Pyston" in sys.version
#=============================================================================
# common imports
#=============================================================================
import logging; log = logging.getLogger(__name__)
if PY3:
import builtins
else:
import __builtin__ as builtins
def add_doc(obj, doc):
"""add docstring to an object"""
obj.__doc__ = doc
#=============================================================================
# the default exported vars
#=============================================================================
__all__ = [
# python versions
'PY2', 'PY3', 'PY26',
# io
'BytesIO', 'StringIO', 'NativeStringIO', 'SafeConfigParser',
'print_',
# type detection
## 'is_mapping',
'int_types',
'num_types',
'unicode_or_bytes_types',
'native_string_types',
# unicode/bytes types & helpers
'u',
'unicode',
'uascii_to_str', 'bascii_to_str',
'str_to_uascii', 'str_to_bascii',
'join_unicode', 'join_bytes',
'join_byte_values', 'join_byte_elems',
'byte_elem_value',
'iter_byte_values',
# iteration helpers
'irange', #'lrange',
'imap', 'lmap',
'iteritems', 'itervalues',
'next',
# collections
'OrderedDict',
# context helpers
'nullcontext',
# introspection
'get_method_function', 'add_doc',
]
# begin accumulating mapping of lazy-loaded attrs,
# 'merged' into module at bottom
_lazy_attrs = dict()
#=============================================================================
# unicode & bytes types
#=============================================================================
if PY3:
unicode = str
# TODO: once we drop python 3.2 support, can use u'' again!
def u(s):
assert isinstance(s, str)
return s
unicode_or_bytes_types = (str, bytes)
native_string_types = (unicode,)
else:
unicode = builtins.unicode
def u(s):
assert isinstance(s, str)
return s.decode("unicode_escape")
unicode_or_bytes_types = (basestring,)
native_string_types = (basestring,)
# shorter preferred aliases
unicode_or_bytes = unicode_or_bytes_types
unicode_or_str = native_string_types
# unicode -- unicode type, regardless of python version
# bytes -- bytes type, regardless of python version
# unicode_or_bytes_types -- types that text can occur in, whether encoded or not
# native_string_types -- types that native python strings (dict keys etc) can occur in.
#=============================================================================
# unicode & bytes helpers
#=============================================================================
# function to join list of unicode strings
join_unicode = u('').join
# function to join list of byte strings
join_bytes = b''.join
if PY3:
def uascii_to_str(s):
assert isinstance(s, unicode)
return s
def bascii_to_str(s):
assert isinstance(s, bytes)
return s.decode("ascii")
def str_to_uascii(s):
assert isinstance(s, str)
return s
def str_to_bascii(s):
assert isinstance(s, str)
return s.encode("ascii")
join_byte_values = join_byte_elems = bytes
def byte_elem_value(elem):
assert isinstance(elem, int)
return elem
def iter_byte_values(s):
assert isinstance(s, bytes)
return s
def iter_byte_chars(s):
assert isinstance(s, bytes)
# FIXME: there has to be a better way to do this
return (bytes([c]) for c in s)
else:
def uascii_to_str(s):
assert isinstance(s, unicode)
return s.encode("ascii")
def bascii_to_str(s):
assert isinstance(s, bytes)
return s
def str_to_uascii(s):
assert isinstance(s, str)
return s.decode("ascii")
def str_to_bascii(s):
assert isinstance(s, str)
return s
def join_byte_values(values):
return join_bytes(chr(v) for v in values)
join_byte_elems = join_bytes
byte_elem_value = ord
def iter_byte_values(s):
assert isinstance(s, bytes)
return (ord(c) for c in s)
def iter_byte_chars(s):
assert isinstance(s, bytes)
return s
add_doc(uascii_to_str, "helper to convert ascii unicode -> native str")
add_doc(bascii_to_str, "helper to convert ascii bytes -> native str")
add_doc(str_to_uascii, "helper to convert ascii native str -> unicode")
add_doc(str_to_bascii, "helper to convert ascii native str -> bytes")
# join_byte_values -- function to convert list of ordinal integers to byte string.
# join_byte_elems -- function to convert list of byte elements to byte string;
# i.e. what's returned by ``b('a')[0]``...
# this is b('a') under PY2, but 97 under PY3.
# byte_elem_value -- function to convert byte element to integer -- a noop under PY3
add_doc(iter_byte_values, "iterate over byte string as sequence of ints 0-255")
add_doc(iter_byte_chars, "iterate over byte string as sequence of 1-byte strings")
#=============================================================================
# numeric
#=============================================================================
if PY3:
int_types = (int,)
num_types = (int, float)
else:
int_types = (int, long)
num_types = (int, long, float)
#=============================================================================
# iteration helpers
#
# irange - range iterable / view (xrange under py2, range under py3)
# lrange - range list (range under py2, list(range()) under py3)
#
# imap - map to iterator
# lmap - map to list
#=============================================================================
if PY3:
irange = range
##def lrange(*a,**k):
## return list(range(*a,**k))
def lmap(*a, **k):
return list(map(*a,**k))
imap = map
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def nextgetter(obj):
return obj.__next__
izip = zip
else:
irange = xrange
##lrange = range
lmap = map
from itertools import imap, izip
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def nextgetter(obj):
return obj.next
add_doc(nextgetter, "return function that yields successive values from iterable")
#=============================================================================
# typing
#=============================================================================
##def is_mapping(obj):
## # non-exhaustive check, enough to distinguish from lists, etc
## return hasattr(obj, "items")
#=============================================================================
# introspection
#=============================================================================
if PY3:
method_function_attr = "__func__"
else:
method_function_attr = "im_func"
def get_method_function(func):
"""given (potential) method, return underlying function"""
return getattr(func, method_function_attr, func)
def get_unbound_method_function(func):
"""given unbound method, return underlying function"""
return func if PY3 else func.__func__
def error_from(exc, # *,
cause=None):
"""
backward compat hack to suppress exception cause in python3.3+
one python < 3.3 support is dropped, can replace all uses with "raise exc from None"
"""
exc.__cause__ = cause
exc.__suppress_context__ = True
return exc
# legacy alias
suppress_cause = error_from
#=============================================================================
# input/output
#=============================================================================
if PY3:
_lazy_attrs = dict(
BytesIO="io.BytesIO",
UnicodeIO="io.StringIO",
NativeStringIO="io.StringIO",
SafeConfigParser="configparser.ConfigParser",
)
print_ = getattr(builtins, "print")
else:
_lazy_attrs = dict(
BytesIO="cStringIO.StringIO",
UnicodeIO="StringIO.StringIO",
NativeStringIO="cStringIO.StringIO",
SafeConfigParser="ConfigParser.SafeConfigParser",
)
def print_(*args, **kwds):
"""The new-style print function."""
# extract kwd args
fp = kwds.pop("file", sys.stdout)
sep = kwds.pop("sep", None)
end = kwds.pop("end", None)
if kwds:
raise TypeError("invalid keyword arguments")
# short-circuit if no target
if fp is None:
return
# use unicode or bytes ?
want_unicode = isinstance(sep, unicode) or isinstance(end, unicode) or \
any(isinstance(arg, unicode) for arg in args)
# pick default end sequence
if end is None:
end = u("\n") if want_unicode else "\n"
elif not isinstance(end, unicode_or_bytes_types):
raise TypeError("end must be None or a string")
# pick default separator
if sep is None:
sep = u(" ") if want_unicode else " "
elif not isinstance(sep, unicode_or_bytes_types):
raise TypeError("sep must be None or a string")
# write to buffer
first = True
write = fp.write
for arg in args:
if first:
first = False
else:
write(sep)
if not isinstance(arg, basestring):
arg = str(arg)
write(arg)
write(end)
#=============================================================================
# collections
#=============================================================================
if PY26:
_lazy_attrs['OrderedDict'] = 'zdppy_password_hash.utils.compat._ordered_dict.OrderedDict'
else:
_lazy_attrs['OrderedDict'] = 'collections.OrderedDict'
#=============================================================================
# context managers
#=============================================================================
try:
# new in py37
from contextlib import nullcontext
except ImportError:
class nullcontext(object):
"""
Context manager that does no additional processing.
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *exc_info):
pass
#=============================================================================
# lazy overlay module
#=============================================================================
from types import ModuleType
def _import_object(source):
"""helper to import object from module; accept format `path.to.object`"""
modname, modattr = source.rsplit(".",1)
mod = __import__(modname, fromlist=[modattr], level=0)
return getattr(mod, modattr)
class _LazyOverlayModule(ModuleType):
"""proxy module which overlays original module,
and lazily imports specified attributes.
this is mainly used to prevent importing of resources
that are only needed by certain password hashes,
yet allow them to be imported from a single location.
used by :mod:`zdppy_password_hash.utils`, :mod:`zdppy_password_hash.crypto`,
and :mod:`zdppy_password_hash.utils.compat`.
"""
@classmethod
def replace_module(cls, name, attrmap):
orig = sys.modules[name]
self = cls(name, attrmap, orig)
sys.modules[name] = self
return self
def __init__(self, name, attrmap, proxy=None):
ModuleType.__init__(self, name)
self.__attrmap = attrmap
self.__proxy = proxy
self.__log = logging.getLogger(name)
def __getattr__(self, attr):
proxy = self.__proxy
if proxy and hasattr(proxy, attr):
return getattr(proxy, attr)
attrmap = self.__attrmap
if attr in attrmap:
source = attrmap[attr]
if callable(source):
value = source()
else:
value = _import_object(source)
setattr(self, attr, value)
self.__log.debug("loaded lazy attr %r: %r", attr, value)
return value
raise AttributeError("'module' object has no attribute '%s'" % (attr,))
def __repr__(self):
proxy = self.__proxy
if proxy:
return repr(proxy)
else:
return ModuleType.__repr__(self)
def __dir__(self):
attrs = set(dir(self.__class__))
attrs.update(self.__dict__)
attrs.update(self.__attrmap)
proxy = self.__proxy
if proxy is not None:
attrs.update(dir(proxy))
return list(attrs)
# replace this module with overlay that will lazily import attributes.
_LazyOverlayModule.replace_module(__name__, _lazy_attrs)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/utils/compat/__init__.py | __init__.py |
#=============================================================================
# imports
#=============================================================================
from __future__ import absolute_import, division, print_function
# core
import datetime
from distutils.dist import Distribution
import os
import re
import subprocess
import time
# pkg
# local
__all__ = [
"stamp_source",
"stamp_distutils_output",
"append_hg_revision",
"as_bool",
]
#=============================================================================
# helpers
#=============================================================================
def get_command_class(opts, name):
return opts['cmdclass'].get(name) or Distribution().get_command_class(name)
def get_command_options(opts, command):
return opts.setdefault("options", {}).setdefault(command, {})
def set_command_options(opts, command, **kwds):
get_command_options(opts, command).update(kwds)
def _get_file(path):
with open(path, "r") as fh:
return fh.read()
def _replace_file(path, content, dry_run=False):
if dry_run:
return
if os.path.exists(path):
# sdist likes to use hardlinks, have to remove them first,
# or we modify *source* file
os.unlink(path)
with open(path, "w") as fh:
fh.write(content)
def stamp_source(base_dir, version, dry_run=False):
"""
update version info in zdppy_password_hash source
"""
#
# update version string in toplevel package source
#
path = os.path.join(base_dir, "zdppy_password_hash", "__init__.py")
content = _get_file(path)
content, count = re.subn('(?m)^__version__\s*=.*$',
'__version__ = ' + repr(version),
content)
assert count == 1, "failed to replace version string"
_replace_file(path, content, dry_run=dry_run)
#
# update flag in setup.py
# (not present when called from bdist_wheel, etc)
#
path = os.path.join(base_dir, "setup.py")
if os.path.exists(path):
content = _get_file(path)
content, count = re.subn('(?m)^stamp_build\s*=.*$',
'stamp_build = False', content)
assert count == 1, "failed to update 'stamp_build' flag"
_replace_file(path, content, dry_run=dry_run)
def stamp_distutils_output(opts, version):
# subclass buildpy to update version string in source
_build_py = get_command_class(opts, "build_py")
class build_py(_build_py):
def build_packages(self):
_build_py.build_packages(self)
stamp_source(self.build_lib, version, self.dry_run)
opts['cmdclass']['build_py'] = build_py
# subclass sdist to do same thing
_sdist = get_command_class(opts, "sdist")
class sdist(_sdist):
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
stamp_source(base_dir, version, self.dry_run)
opts['cmdclass']['sdist'] = sdist
def as_bool(value):
return (value or "").lower() in "yes y true t 1".split()
def append_hg_revision(version):
# call HG via subprocess
# NOTE: for py26 compat, using Popen() instead of check_output()
try:
proc = subprocess.Popen(["hg", "tip", "--template", "{date(date, '%Y%m%d%H%M%S')}+hg.{node|short}"],
stdout=subprocess.PIPE)
stamp, _ = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(1, [])
stamp = stamp.decode("ascii")
except (OSError, subprocess.CalledProcessError):
# fallback - just use build date
now = int(os.environ.get('SOURCE_DATE_EPOCH') or time.time())
build_date = datetime.datetime.utcfromtimestamp(now)
stamp = build_date.strftime("%Y%m%d%H%M%S")
# modify version
if version.endswith((".dev0", ".post0")):
version = version[:-1] + stamp
else:
version += ".post" + stamp
return version
def install_build_py_exclude(opts):
_build_py = get_command_class(opts, "build_py")
class build_py(_build_py):
user_options = _build_py.user_options + [
("exclude-packages=", None,
"exclude packages from builds"),
]
exclude_packages = None
def finalize_options(self):
_build_py.finalize_options(self)
target = self.packages
for package in self.exclude_packages or []:
if package in target:
target.remove(package)
opts['cmdclass']['build_py'] = build_py
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/_setup/stamp.py | stamp.py |
#=============================================================================
# imports
#=============================================================================
# core
from functools import update_wrapper, wraps
import logging; log = logging.getLogger(__name__)
import sys
import weakref
from warnings import warn
# site
try:
from django import VERSION as DJANGO_VERSION
log.debug("found django %r installation", DJANGO_VERSION)
except ImportError:
log.debug("django installation not found")
DJANGO_VERSION = ()
# pkg
from zdppy_password_hash import exc, registry
from zdppy_password_hash.context import CryptContext
from zdppy_password_hash.exc import PasslibRuntimeWarning
from zdppy_password_hash.utils.compat import get_method_function, iteritems, OrderedDict, unicode
from zdppy_password_hash.utils.decor import memoized_property
# local
__all__ = [
"DJANGO_VERSION",
"MIN_DJANGO_VERSION",
"get_preset_config",
"quirks",
]
#: minimum version supported by zdppy_password_hash.ext.django
MIN_DJANGO_VERSION = (1, 8)
#=============================================================================
# quirk detection
#=============================================================================
class quirks:
#: django check_password() started throwing error on encoded=None
#: (really identify_hasher did)
none_causes_check_password_error = DJANGO_VERSION >= (2, 1)
#: django is_usable_password() started returning True for password = {None, ""} values.
empty_is_usable_password = DJANGO_VERSION >= (2, 1)
#: django is_usable_password() started returning True for non-hash strings in 2.1
invalid_is_usable_password = DJANGO_VERSION >= (2, 1)
#=============================================================================
# default policies
#=============================================================================
# map preset names -> zdppy_password_hash.app attrs
_preset_map = {
"django-1.0": "django10_context",
"django-1.4": "django14_context",
"django-1.6": "django16_context",
"django-latest": "django_context",
}
def get_preset_config(name):
"""Returns configuration string for one of the preset strings
supported by the ``PASSLIB_CONFIG`` setting.
Currently supported presets:
* ``"zdppy_password_hash-default"`` - default config used by this release of zdppy_password_hash.
* ``"django-default"`` - config matching currently installed django version.
* ``"django-latest"`` - config matching newest django version (currently same as ``"django-1.6"``).
* ``"django-1.0"`` - config used by stock Django 1.0 - 1.3 installs
* ``"django-1.4"`` - config used by stock Django 1.4 installs
* ``"django-1.6"`` - config used by stock Django 1.6 installs
"""
# TODO: add preset which includes HASHERS + PREFERRED_HASHERS,
# after having imported any custom hashers. e.g. "django-current"
if name == "django-default":
if not DJANGO_VERSION:
raise ValueError("can't resolve django-default preset, "
"django not installed")
name = "django-1.6"
if name == "zdppy_password_hash-default":
return PASSLIB_DEFAULT
try:
attr = _preset_map[name]
except KeyError:
raise ValueError("unknown preset config name: %r" % name)
import zdppy_password_hash.apps
return getattr(zdppy_password_hash.apps, attr).to_string()
# default context used by zdppy_password_hash 1.6
PASSLIB_DEFAULT = """
[zdppy_password_hash]
; list of schemes supported by configuration
; currently all django 1.6, 1.4, and 1.0 hashes,
; and three common modular crypt format hashes.
schemes =
django_pbkdf2_sha256, django_pbkdf2_sha1, django_bcrypt, django_bcrypt_sha256,
django_salted_sha1, django_salted_md5, django_des_crypt, hex_md5,
sha512_crypt, bcrypt, phpass
; default scheme to use for new hashes
default = django_pbkdf2_sha256
; hashes using these schemes will automatically be re-hashed
; when the user logs in (currently all django 1.0 hashes)
deprecated =
django_pbkdf2_sha1, django_salted_sha1, django_salted_md5,
django_des_crypt, hex_md5
; sets some common options, including minimum rounds for two primary hashes.
; if a hash has less than this number of rounds, it will be re-hashed.
sha512_crypt__min_rounds = 80000
django_pbkdf2_sha256__min_rounds = 10000
; set somewhat stronger iteration counts for ``User.is_staff``
staff__sha512_crypt__default_rounds = 100000
staff__django_pbkdf2_sha256__default_rounds = 12500
; and even stronger ones for ``User.is_superuser``
superuser__sha512_crypt__default_rounds = 120000
superuser__django_pbkdf2_sha256__default_rounds = 15000
"""
#=============================================================================
# helpers
#=============================================================================
#: prefix used to shoehorn zdppy_password_hash's handler names into django hasher namespace
PASSLIB_WRAPPER_PREFIX = "zdppy_password_hash_"
#: prefix used by all the django-specific hash formats in zdppy_password_hash;
#: all of these hashes should have a ``.django_name`` attribute.
DJANGO_COMPAT_PREFIX = "django_"
#: set of hashes w/o "django_" prefix, but which also expose ``.django_name``.
_other_django_hashes = set(["hex_md5"])
def _wrap_method(method):
"""wrap method object in bare function"""
@wraps(method)
def wrapper(*args, **kwds):
return method(*args, **kwds)
return wrapper
#=============================================================================
# translator
#=============================================================================
class DjangoTranslator(object):
"""
Object which helps translate zdppy_password_hash hasher objects / names
to and from django hasher objects / names.
These methods are wrapped in a class so that results can be cached,
but with the ability to have independant caches, since django hasher
names may / may not correspond to the same instance (or even class).
"""
#=============================================================================
# instance attrs
#=============================================================================
#: CryptContext instance
#: (if any -- generally only set by DjangoContextAdapter subclass)
context = None
#: internal cache of zdppy_password_hash hasher -> django hasher instance.
#: key stores weakref to zdppy_password_hash hasher.
_django_hasher_cache = None
#: special case -- unsalted_sha1
_django_unsalted_sha1 = None
#: internal cache of django name -> zdppy_password_hash hasher
#: value stores weakrefs to zdppy_password_hash hasher.
_zdppy_password_hash_hasher_cache = None
#=============================================================================
# init
#=============================================================================
def __init__(self, context=None, **kwds):
super(DjangoTranslator, self).__init__(**kwds)
if context is not None:
self.context = context
self._django_hasher_cache = weakref.WeakKeyDictionary()
self._zdppy_password_hash_hasher_cache = weakref.WeakValueDictionary()
def reset_hashers(self):
self._django_hasher_cache.clear()
self._zdppy_password_hash_hasher_cache.clear()
self._django_unsalted_sha1 = None
def _get_zdppy_password_hash_hasher(self, zdppy_password_hash_name):
"""
resolve zdppy_password_hash hasher by name, using context if available.
"""
context = self.context
if context is None:
return registry.get_crypt_handler(zdppy_password_hash_name)
else:
return context.handler(zdppy_password_hash_name)
#=============================================================================
# resolve zdppy_password_hash hasher -> django hasher
#=============================================================================
def zdppy_password_hash_to_django_name(self, zdppy_password_hash_name):
"""
Convert zdppy_password_hash hasher / name to Django hasher name.
"""
return self.zdppy_password_hash_to_django(zdppy_password_hash_name).algorithm
# XXX: add option (in class, or call signature) to always return a wrapper,
# rather than native builtin -- would let HashersTest check that
# our own wrapper + implementations are matching up with their tests.
def zdppy_password_hash_to_django(self, zdppy_password_hash_hasher, cached=True):
"""
Convert zdppy_password_hash hasher / name to Django hasher.
:param zdppy_password_hash_hasher:
zdppy_password_hash hasher / name
:returns:
django hasher instance
"""
# resolve names to hasher
if not hasattr(zdppy_password_hash_hasher, "name"):
zdppy_password_hash_hasher = self._get_zdppy_password_hash_hasher(zdppy_password_hash_hasher)
# check cache
if cached:
cache = self._django_hasher_cache
try:
return cache[zdppy_password_hash_hasher]
except KeyError:
pass
result = cache[zdppy_password_hash_hasher] = \
self.zdppy_password_hash_to_django(zdppy_password_hash_hasher, cached=False)
return result
# find native equivalent, and return wrapper if there isn't one
django_name = getattr(zdppy_password_hash_hasher, "django_name", None)
if django_name:
return self._create_django_hasher(django_name)
else:
return _PasslibHasherWrapper(zdppy_password_hash_hasher)
_builtin_django_hashers = dict(
md5="MD5PasswordHasher",
)
if DJANGO_VERSION > (2, 1):
# present but disabled by default as of django 2.1; not sure when added,
# so not listing it by default.
_builtin_django_hashers.update(
bcrypt="BCryptPasswordHasher",
)
def _create_django_hasher(self, django_name):
"""
helper to create new django hasher by name.
wraps underlying django methods.
"""
# if we haven't patched django, can use it directly
module = sys.modules.get("zdppy_password_hash.ext.django.models")
if module is None or not module.adapter.patched:
from django.contrib.auth.hashers import get_hasher
try:
return get_hasher(django_name)
except ValueError as err:
if not str(err).startswith("Unknown password hashing algorithm"):
raise
else:
# We've patched django's get_hashers(), so calling django's get_hasher()
# or get_hashers_by_algorithm() would only land us back here.
# As non-ideal workaround, have to use original get_hashers(),
get_hashers = module.adapter._manager.getorig("django.contrib.auth.hashers:get_hashers").__wrapped__
for hasher in get_hashers():
if hasher.algorithm == django_name:
return hasher
# hardcode a few for cases where get_hashers() lookup won't work
# (mainly, hashers that are present in django, but disabled by their default config)
path = self._builtin_django_hashers.get(django_name)
if path:
if "." not in path:
path = "django.contrib.auth.hashers." + path
from django.utils.module_loading import import_string
return import_string(path)()
raise ValueError("unknown hasher: %r" % django_name)
#=============================================================================
# reverse django -> zdppy_password_hash
#=============================================================================
def django_to_zdppy_password_hash_name(self, django_name):
"""
Convert Django hasher / name to Passlib hasher name.
"""
return self.django_to_zdppy_password_hash(django_name).name
def django_to_zdppy_password_hash(self, django_name, cached=True):
"""
Convert Django hasher / name to Passlib hasher / name.
If present, CryptContext will be checked instead of main registry.
:param django_name:
Django hasher class or algorithm name.
"default" allowed if context provided.
:raises ValueError:
if can't resolve hasher.
:returns:
zdppy_password_hash hasher or name
"""
# check for django hasher
if hasattr(django_name, "algorithm"):
# check for zdppy_password_hash adapter
if isinstance(django_name, _PasslibHasherWrapper):
return django_name.zdppy_password_hash_handler
# resolve django hasher -> name
django_name = django_name.algorithm
# check cache
if cached:
cache = self._zdppy_password_hash_hasher_cache
try:
return cache[django_name]
except KeyError:
pass
result = cache[django_name] = \
self.django_to_zdppy_password_hash(django_name, cached=False)
return result
# check if it's an obviously-wrapped name
if django_name.startswith(PASSLIB_WRAPPER_PREFIX):
zdppy_password_hash_name = django_name[len(PASSLIB_WRAPPER_PREFIX):]
return self._get_zdppy_password_hash_hasher(zdppy_password_hash_name)
# resolve default
if django_name == "default":
context = self.context
if context is None:
raise TypeError("can't determine default scheme w/ context")
return context.handler()
# special case: Django uses a separate hasher for "sha1$$digest"
# hashes (unsalted_sha1) and "sha1$salt$digest" (sha1);
# but zdppy_password_hash uses "django_salted_sha1" for both of these.
if django_name == "unsalted_sha1":
django_name = "sha1"
# resolve name
# XXX: bother caching these lists / mapping?
# not needed in long-term due to cache above.
context = self.context
if context is None:
# check registry
# TODO: should make iteration via registry easier
candidates = (
registry.get_crypt_handler(zdppy_password_hash_name)
for zdppy_password_hash_name in registry.list_crypt_handlers()
if zdppy_password_hash_name.startswith(DJANGO_COMPAT_PREFIX) or
zdppy_password_hash_name in _other_django_hashes
)
else:
# check context
candidates = context.schemes(resolve=True)
for handler in candidates:
if getattr(handler, "django_name", None) == django_name:
return handler
# give up
# NOTE: this should only happen for custom django hashers that we don't
# know the equivalents for. _HasherHandler (below) is work in
# progress that would allow us to at least return a wrapper.
raise ValueError("can't translate django name to zdppy_password_hash name: %r" %
(django_name,))
#=============================================================================
# django hasher lookup
#=============================================================================
def resolve_django_hasher(self, django_name, cached=True):
"""
Take in a django algorithm name, return django hasher.
"""
# check for django hasher
if hasattr(django_name, "algorithm"):
return django_name
# resolve to zdppy_password_hash hasher
zdppy_password_hash_hasher = self.django_to_zdppy_password_hash(django_name, cached=cached)
# special case: Django uses a separate hasher for "sha1$$digest"
# hashes (unsalted_sha1) and "sha1$salt$digest" (sha1);
# but zdppy_password_hash uses "django_salted_sha1" for both of these.
# XXX: this isn't ideal way to handle this. would like to do something
# like pass "django_variant=django_name" into zdppy_password_hash_to_django(),
# and have it cache separate hasher there.
# but that creates a LOT of complication in it's cache structure,
# for what is just one special case.
if django_name == "unsalted_sha1" and zdppy_password_hash_hasher.name == "django_salted_sha1":
if not cached:
return self._create_django_hasher(django_name)
result = self._django_unsalted_sha1
if result is None:
result = self._django_unsalted_sha1 = self._create_django_hasher(django_name)
return result
# lookup corresponding django hasher
return self.zdppy_password_hash_to_django(zdppy_password_hash_hasher, cached=cached)
#=============================================================================
# eoc
#=============================================================================
#=============================================================================
# adapter
#=============================================================================
class DjangoContextAdapter(DjangoTranslator):
"""
Object which tries to adapt a Passlib CryptContext object,
using a Django-hasher compatible API.
When installed in django, :mod:`!zdppy_password_hash.ext.django` will create
an instance of this class, and then monkeypatch the appropriate
methods into :mod:`!django.contrib.auth` and other appropriate places.
"""
#=============================================================================
# instance attrs
#=============================================================================
#: CryptContext instance we're wrapping
context = None
#: ref to original make_password(),
#: needed to generate usuable passwords that match django
_orig_make_password = None
#: ref to django helper of this name -- not monkeypatched
is_password_usable = None
#: PatchManager instance used to track installation
_manager = None
#: whether config=disabled flag was set
enabled = True
#: patch status
patched = False
#=============================================================================
# init
#=============================================================================
def __init__(self, context=None, get_user_category=None, **kwds):
# init log
self.log = logging.getLogger(__name__ + ".DjangoContextAdapter")
# init parent, filling in default context object
if context is None:
context = CryptContext()
super(DjangoContextAdapter, self).__init__(context=context, **kwds)
# setup user category
if get_user_category:
assert callable(get_user_category)
self.get_user_category = get_user_category
# install lru cache wrappers
try:
from functools import lru_cache # new py32
except ImportError:
from django.utils.lru_cache import lru_cache # py2 compat, removed in django 3 (or earlier?)
self.get_hashers = lru_cache()(self.get_hashers)
# get copy of original make_password
from django.contrib.auth.hashers import make_password
if make_password.__module__.startswith("zdppy_password_hash."):
make_password = _PatchManager.peek_unpatched_func(make_password)
self._orig_make_password = make_password
# get other django helpers
from django.contrib.auth.hashers import is_password_usable
self.is_password_usable = is_password_usable
# init manager
mlog = logging.getLogger(__name__ + ".DjangoContextAdapter._manager")
self._manager = _PatchManager(log=mlog)
def reset_hashers(self):
"""
Wrapper to manually reset django's hasher lookup cache
"""
# resets cache for .get_hashers() & .get_hashers_by_algorithm()
from django.contrib.auth.hashers import reset_hashers
reset_hashers(setting="PASSWORD_HASHERS")
# reset internal caches
super(DjangoContextAdapter, self).reset_hashers()
#=============================================================================
# django hashers helpers -- hasher lookup
#=============================================================================
# lru_cache()'ed by init
def get_hashers(self):
"""
Passlib replacement for get_hashers() --
Return list of available django hasher classes
"""
zdppy_password_hash_to_django = self.zdppy_password_hash_to_django
return [zdppy_password_hash_to_django(hasher)
for hasher in self.context.schemes(resolve=True)]
def get_hasher(self, algorithm="default"):
"""
Passlib replacement for get_hasher() --
Return django hasher by name
"""
return self.resolve_django_hasher(algorithm)
def identify_hasher(self, encoded):
"""
Passlib replacement for identify_hasher() --
Identify django hasher based on hash.
"""
handler = self.context.identify(encoded, resolve=True, required=True)
if handler.name == "django_salted_sha1" and encoded.startswith("sha1$$"):
# Django uses a separate hasher for "sha1$$digest" hashes, but
# zdppy_password_hash identifies it as belonging to "sha1$salt$digest" handler.
# We want to resolve to correct django hasher.
return self.get_hasher("unsalted_sha1")
return self.zdppy_password_hash_to_django(handler)
#=============================================================================
# django.contrib.auth.hashers helpers -- password helpers
#=============================================================================
def make_password(self, password, salt=None, hasher="default"):
"""
Passlib replacement for make_password()
"""
if password is None:
return self._orig_make_password(None)
# NOTE: relying on hasher coming from context, and thus having
# context-specific config baked into it.
zdppy_password_hash_hasher = self.django_to_zdppy_password_hash(hasher)
if "salt" not in zdppy_password_hash_hasher.setting_kwds:
# ignore salt param even if preset
pass
elif hasher.startswith("unsalted_"):
# Django uses a separate 'unsalted_sha1' hasher for "sha1$$digest",
# but zdppy_password_hash just reuses it's "sha1" handler ("sha1$salt$digest"). To make
# this work, have to explicitly tell the sha1 handler to use an empty salt.
zdppy_password_hash_hasher = zdppy_password_hash_hasher.using(salt="")
elif salt:
# Django make_password() autogenerates a salt if salt is bool False (None / ''),
# so we only pass the keyword on if there's actually a fixed salt.
zdppy_password_hash_hasher = zdppy_password_hash_hasher.using(salt=salt)
return zdppy_password_hash_hasher.hash(password)
def check_password(self, password, encoded, setter=None, preferred="default"):
"""
Passlib replacement for check_password()
"""
# XXX: this currently ignores "preferred" keyword, since its purpose
# was for hash migration, and that's handled by the context.
# XXX: honor "none_causes_check_password_error" quirk for django 2.2+?
# seems safer to return False.
if password is None or not self.is_password_usable(encoded):
return False
# verify password
context = self.context
try:
correct = context.verify(password, encoded)
except exc.UnknownHashError:
# As of django 1.5, unidentifiable hashes returns False
# (side-effect of django issue 18453)
return False
if not (correct and setter):
return correct
# check if we need to rehash
if preferred == "default":
if not context.needs_update(encoded, secret=password):
return correct
else:
# Django's check_password() won't call setter() on a
# 'preferred' alg, even if it's otherwise deprecated. To try and
# replicate this behavior if preferred is set, we look up the
# zdppy_password_hash hasher, and call it's original needs_update() method.
# TODO: Solve redundancy that verify() call
# above is already identifying hash.
hasher = self.django_to_zdppy_password_hash(preferred)
if (hasher.identify(encoded) and
not hasher.needs_update(encoded, secret=password)):
# alg is 'preferred' and hash itself doesn't need updating,
# so nothing to do.
return correct
# else: either hash isn't preferred, or it needs updating.
# call setter to rehash
setter(password)
return correct
#=============================================================================
# django users helpers
#=============================================================================
def user_check_password(self, user, password):
"""
Passlib replacement for User.check_password()
"""
if password is None:
return False
hash = user.password
if not self.is_password_usable(hash):
return False
cat = self.get_user_category(user)
try:
ok, new_hash = self.context.verify_and_update(password, hash, category=cat)
except exc.UnknownHashError:
# As of django 1.5, unidentifiable hashes returns False
# (side-effect of django issue 18453)
return False
if ok and new_hash is not None:
# migrate to new hash if needed.
user.password = new_hash
user.save()
return ok
def user_set_password(self, user, password):
"""
Passlib replacement for User.set_password()
"""
if password is None:
user.set_unusable_password()
else:
cat = self.get_user_category(user)
user.password = self.context.hash(password, category=cat)
def get_user_category(self, user):
"""
Helper for hashing passwords per-user --
figure out the CryptContext category for specified Django user object.
.. note::
This may be overridden via PASSLIB_GET_CATEGORY django setting
"""
if user.is_superuser:
return "superuser"
elif user.is_staff:
return "staff"
else:
return None
#=============================================================================
# patch control
#=============================================================================
HASHERS_PATH = "django.contrib.auth.hashers"
MODELS_PATH = "django.contrib.auth.models"
USER_CLASS_PATH = MODELS_PATH + ":User"
FORMS_PATH = "django.contrib.auth.forms"
#: list of locations to patch
patch_locations = [
#
# User object
# NOTE: could leave defaults alone, but want to have user available
# so that we can support get_user_category()
#
(USER_CLASS_PATH + ".check_password", "user_check_password", dict(method=True)),
(USER_CLASS_PATH + ".set_password", "user_set_password", dict(method=True)),
#
# Hashers module
#
(HASHERS_PATH + ":", "check_password"),
(HASHERS_PATH + ":", "make_password"),
(HASHERS_PATH + ":", "get_hashers"),
(HASHERS_PATH + ":", "get_hasher"),
(HASHERS_PATH + ":", "identify_hasher"),
#
# Patch known imports from hashers module
#
(MODELS_PATH + ":", "check_password"),
(MODELS_PATH + ":", "make_password"),
(FORMS_PATH + ":", "get_hasher"),
(FORMS_PATH + ":", "identify_hasher"),
]
def install_patch(self):
"""
Install monkeypatch to replace django hasher framework.
"""
# don't reapply
log = self.log
if self.patched:
log.warning("monkeypatching already applied, refusing to reapply")
return False
# version check
if DJANGO_VERSION < MIN_DJANGO_VERSION:
raise RuntimeError("zdppy_password_hash.ext.django requires django >= %s" %
(MIN_DJANGO_VERSION,))
# log start
log.debug("preparing to monkeypatch django ...")
# run through patch locations
manager = self._manager
for record in self.patch_locations:
if len(record) == 2:
record += ({},)
target, source, opts = record
if target.endswith((":", ",")):
target += source
value = getattr(self, source)
if opts.get("method"):
# have to wrap our method in a function,
# since we're installing it in a class *as* a method
# XXX: make this a flag for .patch()?
value = _wrap_method(value)
manager.patch(target, value)
# reset django's caches (e.g. get_hash_by_algorithm)
self.reset_hashers()
# done!
self.patched = True
log.debug("... finished monkeypatching django")
return True
def remove_patch(self):
"""
Remove monkeypatch from django hasher framework.
As precaution in case there are lingering refs to context,
context object will be wiped.
.. warning::
This may cause problems if any other Django modules have imported
their own copies of the patched functions, though the patched
code has been designed to throw an error as soon as possible in
this case.
"""
log = self.log
manager = self._manager
if self.patched:
log.debug("removing django monkeypatching...")
manager.unpatch_all(unpatch_conflicts=True)
self.context.load({})
self.patched = False
self.reset_hashers()
log.debug("...finished removing django monkeypatching")
return True
if manager.isactive(): # pragma: no cover -- sanity check
log.warning("reverting partial monkeypatching of django...")
manager.unpatch_all()
self.context.load({})
self.reset_hashers()
log.debug("...finished removing django monkeypatching")
return True
log.debug("django not monkeypatched")
return False
#=============================================================================
# loading config
#=============================================================================
def load_model(self):
"""
Load configuration from django, and install patch.
"""
self._load_settings()
if self.enabled:
try:
self.install_patch()
except:
# try to undo what we can
self.remove_patch()
raise
else:
if self.patched: # pragma: no cover -- sanity check
log.error("didn't expect monkeypatching would be applied!")
self.remove_patch()
log.debug("zdppy_password_hash.ext.django loaded")
def _load_settings(self):
"""
Update settings from django
"""
from django.conf import settings
# TODO: would like to add support for inheriting config from a preset
# (or from existing hasher state) and letting PASSLIB_CONFIG
# be an update, not a replacement.
# TODO: wrap and import any custom hashers as zdppy_password_hash handlers,
# so they could be used in the zdppy_password_hash config.
# load config from settings
_UNSET = object()
config = getattr(settings, "PASSLIB_CONFIG", _UNSET)
if config is _UNSET:
# XXX: should probably deprecate this alias
config = getattr(settings, "PASSLIB_CONTEXT", _UNSET)
if config is _UNSET:
config = "zdppy_password_hash-default"
if config is None:
warn("setting PASSLIB_CONFIG=None is deprecated, "
"and support will be removed in Passlib 1.8, "
"use PASSLIB_CONFIG='disabled' instead.",
DeprecationWarning)
config = "disabled"
elif not isinstance(config, (unicode, bytes, dict)):
raise exc.ExpectedTypeError(config, "str or dict", "PASSLIB_CONFIG")
# load custom category func (if any)
get_category = getattr(settings, "PASSLIB_GET_CATEGORY", None)
if get_category and not callable(get_category):
raise exc.ExpectedTypeError(get_category, "callable", "PASSLIB_GET_CATEGORY")
# check if we've been disabled
if config == "disabled":
self.enabled = False
return
else:
self.__dict__.pop("enabled", None)
# resolve any preset aliases
if isinstance(config, str) and '\n' not in config:
config = get_preset_config(config)
# setup category func
if get_category:
self.get_user_category = get_category
else:
self.__dict__.pop("get_category", None)
# setup context
self.context.load(config)
self.reset_hashers()
#=============================================================================
# eof
#=============================================================================
#=============================================================================
# wrapping zdppy_password_hash handlers as django hashers
#=============================================================================
_GEN_SALT_SIGNAL = "--!!!generate-new-salt!!!--"
class ProxyProperty(object):
"""helper that proxies another attribute"""
def __init__(self, attr):
self.attr = attr
def __get__(self, obj, cls):
if obj is None:
cls = obj
return getattr(obj, self.attr)
def __set__(self, obj, value):
setattr(obj, self.attr, value)
def __delete__(self, obj):
delattr(obj, self.attr)
class _PasslibHasherWrapper(object):
"""
adapter which which wraps a :cls:`zdppy_password_hash.ifc.PasswordHash` class,
and provides an interface compatible with the Django hasher API.
:param zdppy_password_hash_handler:
zdppy_password_hash hash handler (e.g. :cls:`zdppy_password_hash.hash.sha256_crypt`.
"""
#=====================================================================
# instance attrs
#=====================================================================
#: zdppy_password_hash handler that we're adapting.
zdppy_password_hash_handler = None
# NOTE: 'rounds' attr will store variable rounds, IF handler supports it.
# 'iterations' will act as proxy, for compatibility with django pbkdf2 hashers.
# rounds = None
# iterations = None
#=====================================================================
# init
#=====================================================================
def __init__(self, zdppy_password_hash_handler):
# init handler
if getattr(zdppy_password_hash_handler, "django_name", None):
raise ValueError("handlers that reflect an official django "
"hasher shouldn't be wrapped: %r" %
(zdppy_password_hash_handler.name,))
if zdppy_password_hash_handler.is_disabled:
# XXX: could this be implemented?
raise ValueError("can't wrap disabled-hash handlers: %r" %
(zdppy_password_hash_handler.name))
self.zdppy_password_hash_handler = zdppy_password_hash_handler
# init rounds support
if self._has_rounds:
self.rounds = zdppy_password_hash_handler.default_rounds
self.iterations = ProxyProperty("rounds")
#=====================================================================
# internal methods
#=====================================================================
def __repr__(self):
return "<PasslibHasherWrapper handler=%r>" % self.zdppy_password_hash_handler
#=====================================================================
# internal properties
#=====================================================================
@memoized_property
def __name__(self):
return "Passlib_%s_PasswordHasher" % self.zdppy_password_hash_handler.name.title()
@memoized_property
def _has_rounds(self):
return "rounds" in self.zdppy_password_hash_handler.setting_kwds
@memoized_property
def _translate_kwds(self):
"""
internal helper for safe_summary() --
used to translate zdppy_password_hash hash options -> django keywords
"""
out = dict(checksum="hash")
if self._has_rounds and "pbkdf2" in self.zdppy_password_hash_handler.name:
out['rounds'] = 'iterations'
return out
#=====================================================================
# hasher properties
#=====================================================================
@memoized_property
def algorithm(self):
return PASSLIB_WRAPPER_PREFIX + self.zdppy_password_hash_handler.name
#=====================================================================
# hasher api
#=====================================================================
def salt(self):
# NOTE: zdppy_password_hash's handler.hash() should generate new salt each time,
# so this just returns a special constant which tells
# encode() (below) not to pass a salt keyword along.
return _GEN_SALT_SIGNAL
def verify(self, password, encoded):
return self.zdppy_password_hash_handler.verify(password, encoded)
def encode(self, password, salt=None, rounds=None, iterations=None):
kwds = {}
if salt is not None and salt != _GEN_SALT_SIGNAL:
kwds['salt'] = salt
if self._has_rounds:
if rounds is not None:
kwds['rounds'] = rounds
elif iterations is not None:
kwds['rounds'] = iterations
else:
kwds['rounds'] = self.rounds
elif rounds is not None or iterations is not None:
warn("%s.hash(): 'rounds' and 'iterations' are ignored" % self.__name__)
handler = self.zdppy_password_hash_handler
if kwds:
handler = handler.using(**kwds)
return handler.hash(password)
def safe_summary(self, encoded):
from django.contrib.auth.hashers import mask_hash
from django.utils.translation import ugettext_noop as _
handler = self.zdppy_password_hash_handler
items = [
# since this is user-facing, we're reporting zdppy_password_hash's name,
# without the distracting PASSLIB_HASHER_PREFIX prepended.
(_('algorithm'), handler.name),
]
if hasattr(handler, "parsehash"):
kwds = handler.parsehash(encoded, sanitize=mask_hash)
for key, value in iteritems(kwds):
key = self._translate_kwds.get(key, key)
items.append((_(key), value))
return OrderedDict(items)
def must_update(self, encoded):
# TODO: would like access CryptContext, would need caller to pass it to get_zdppy_password_hash_hasher().
# for now (as of zdppy_password_hash 1.6.6), replicating django policy that this returns True
# if 'encoded' hash has different rounds value from self.rounds
if self._has_rounds:
# XXX: could cache this subclass somehow (would have to intercept writes to self.rounds)
# TODO: always call subcls/handler.needs_update() in case there's other things to check
subcls = self.zdppy_password_hash_handler.using(min_rounds=self.rounds, max_rounds=self.rounds)
if subcls.needs_update(encoded):
return True
return False
#=====================================================================
# eoc
#=====================================================================
#=============================================================================
# adapting django hashers -> zdppy_password_hash handlers
#=============================================================================
# TODO: this code probably halfway works, mainly just needs
# a routine to read HASHERS and PREFERRED_HASHER.
##from zdppy_password_hash.registry import register_crypt_handler
##from zdppy_password_hash.utils import classproperty, to_native_str, to_unicode
##from zdppy_password_hash.utils.compat import unicode
##
##
##class _HasherHandler(object):
## "helper for wrapping Hasher instances as zdppy_password_hash handlers"
## # FIXME: this generic wrapper doesn't handle custom settings
## # FIXME: genconfig / genhash not supported.
##
## def __init__(self, hasher):
## self.django_hasher = hasher
## if hasattr(hasher, "iterations"):
## # assume encode() accepts an "iterations" parameter.
## # fake min/max rounds
## self.min_rounds = 1
## self.max_rounds = 0xFFFFffff
## self.default_rounds = self.django_hasher.iterations
## self.setting_kwds += ("rounds",)
##
## # hasher instance - filled in by constructor
## django_hasher = None
##
## setting_kwds = ("salt",)
## context_kwds = ()
##
## @property
## def name(self):
## # XXX: need to make sure this wont' collide w/ builtin django hashes.
## # maybe by renaming this to django compatible aliases?
## return DJANGO_PASSLIB_PREFIX + self.django_name
##
## @property
## def django_name(self):
## # expose this so hasher_to_zdppy_password_hash_name() extracts original name
## return self.django_hasher.algorithm
##
## @property
## def ident(self):
## # this should always be correct, as django relies on ident prefix.
## return unicode(self.django_name + "$")
##
## @property
## def identify(self, hash):
## # this should always work, as django relies on ident prefix.
## return to_unicode(hash, "latin-1", "hash").startswith(self.ident)
##
## @property
## def hash(self, secret, salt=None, **kwds):
## # NOTE: from how make_password() is coded, all hashers
## # should have salt param. but only some will have
## # 'iterations' parameter.
## opts = {}
## if 'rounds' in self.setting_kwds and 'rounds' in kwds:
## opts['iterations'] = kwds.pop("rounds")
## if kwds:
## raise TypeError("unexpected keyword arguments: %r" % list(kwds))
## if isinstance(secret, unicode):
## secret = secret.encode("utf-8")
## if salt is None:
## salt = self.django_hasher.salt()
## return to_native_str(self.django_hasher(secret, salt, **opts))
##
## @property
## def verify(self, secret, hash):
## hash = to_native_str(hash, "utf-8", "hash")
## if isinstance(secret, unicode):
## secret = secret.encode("utf-8")
## return self.django_hasher.verify(secret, hash)
##
##def register_hasher(hasher):
## handler = _HasherHandler(hasher)
## register_crypt_handler(handler)
## return handler
#=============================================================================
# monkeypatch helpers
#=============================================================================
# private singleton indicating lack-of-value
_UNSET = object()
class _PatchManager(object):
"""helper to manage monkeypatches and run sanity checks"""
# NOTE: this could easily use a dict interface,
# but keeping it distinct to make clear that it's not a dict,
# since it has important side-effects.
#===================================================================
# init and support
#===================================================================
def __init__(self, log=None):
# map of key -> (original value, patched value)
# original value may be _UNSET
self.log = log or logging.getLogger(__name__ + "._PatchManager")
self._state = {}
def isactive(self):
return bool(self._state)
# bool value tests if any patches are currently applied.
# NOTE: this behavior is deprecated in favor of .isactive
__bool__ = __nonzero__ = isactive
def _import_path(self, path):
"""retrieve obj and final attribute name from resource path"""
name, attr = path.split(":")
obj = __import__(name, fromlist=[attr], level=0)
while '.' in attr:
head, attr = attr.split(".", 1)
obj = getattr(obj, head)
return obj, attr
@staticmethod
def _is_same_value(left, right):
"""check if two values are the same (stripping method wrappers, etc)"""
return get_method_function(left) == get_method_function(right)
#===================================================================
# reading
#===================================================================
def _get_path(self, key, default=_UNSET):
obj, attr = self._import_path(key)
return getattr(obj, attr, default)
def get(self, path, default=None):
"""return current value for path"""
return self._get_path(path, default)
def getorig(self, path, default=None):
"""return original (unpatched) value for path"""
try:
value, _= self._state[path]
except KeyError:
value = self._get_path(path)
return default if value is _UNSET else value
def check_all(self, strict=False):
"""run sanity check on all keys, issue warning if out of sync"""
same = self._is_same_value
for path, (orig, expected) in iteritems(self._state):
if same(self._get_path(path), expected):
continue
msg = "another library has patched resource: %r" % path
if strict:
raise RuntimeError(msg)
else:
warn(msg, PasslibRuntimeWarning)
#===================================================================
# patching
#===================================================================
def _set_path(self, path, value):
obj, attr = self._import_path(path)
if value is _UNSET:
if hasattr(obj, attr):
delattr(obj, attr)
else:
setattr(obj, attr, value)
def patch(self, path, value, wrap=False):
"""monkeypatch object+attr at <path> to have <value>, stores original"""
assert value != _UNSET
current = self._get_path(path)
try:
orig, expected = self._state[path]
except KeyError:
self.log.debug("patching resource: %r", path)
orig = current
else:
self.log.debug("modifying resource: %r", path)
if not self._is_same_value(current, expected):
warn("overridding resource another library has patched: %r"
% path, PasslibRuntimeWarning)
if wrap:
assert callable(value)
wrapped = orig
wrapped_by = value
def wrapper(*args, **kwds):
return wrapped_by(wrapped, *args, **kwds)
update_wrapper(wrapper, value)
value = wrapper
if callable(value):
# needed by DjangoContextAdapter init
get_method_function(value)._patched_original_value = orig
self._set_path(path, value)
self._state[path] = (orig, value)
@classmethod
def peek_unpatched_func(cls, value):
return value._patched_original_value
##def patch_many(self, **kwds):
## "override specified resources with new values"
## for path, value in iteritems(kwds):
## self.patch(path, value)
def monkeypatch(self, parent, name=None, enable=True, wrap=False):
"""function decorator which patches function of same name in <parent>"""
def builder(func):
if enable:
sep = "." if ":" in parent else ":"
path = parent + sep + (name or func.__name__)
self.patch(path, func, wrap=wrap)
return func
if callable(name):
# called in non-decorator mode
func = name
name = None
builder(func)
return None
return builder
#===================================================================
# unpatching
#===================================================================
def unpatch(self, path, unpatch_conflicts=True):
try:
orig, expected = self._state[path]
except KeyError:
return
current = self._get_path(path)
self.log.debug("unpatching resource: %r", path)
if not self._is_same_value(current, expected):
if unpatch_conflicts:
warn("reverting resource another library has patched: %r"
% path, PasslibRuntimeWarning)
else:
warn("not reverting resource another library has patched: %r"
% path, PasslibRuntimeWarning)
del self._state[path]
return
self._set_path(path, orig)
del self._state[path]
def unpatch_all(self, **kwds):
for key in list(self._state):
self.unpatch(key, **kwds)
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/ext/django/utils.py | utils.py |
from __future__ import division
# core
import hashlib
import logging; log = logging.getLogger(__name__)
try:
# new in py3.4
from hashlib import pbkdf2_hmac as _stdlib_pbkdf2_hmac
if _stdlib_pbkdf2_hmac.__module__ == "hashlib":
# builtin pure-python backends are slightly faster than stdlib's pure python fallback,
# so only using stdlib's version if it's backed by openssl's pbkdf2_hmac()
log.debug("ignoring pure-python hashlib.pbkdf2_hmac()")
_stdlib_pbkdf2_hmac = None
except ImportError:
_stdlib_pbkdf2_hmac = None
import re
import os
from struct import Struct
from warnings import warn
# site
try:
# https://pypi.python.org/pypi/fastpbkdf2/
from fastpbkdf2 import pbkdf2_hmac as _fast_pbkdf2_hmac
except ImportError:
_fast_pbkdf2_hmac = None
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.utils import join_bytes, to_native_str, join_byte_values, to_bytes, \
SequenceMixin, as_bool
from zdppy_password_hash.utils.compat import irange, int_types, unicode_or_bytes_types, PY3, error_from
from zdppy_password_hash.utils.decor import memoized_property
# local
__all__ = [
# hash utils
"lookup_hash",
"HashInfo",
"norm_hash_name",
# hmac utils
"compile_hmac",
# kdfs
"pbkdf1",
"pbkdf2_hmac",
]
#=============================================================================
# generic constants
#=============================================================================
#: max 32-bit value
MAX_UINT32 = (1 << 32) - 1
#: max 64-bit value
MAX_UINT64 = (1 << 64) - 1
#=============================================================================
# hash utils
#=============================================================================
#: list of known hash names, used by lookup_hash()'s _norm_hash_name() helper
_known_hash_names = [
# format: (hashlib/ssl name, iana name or standin, other known aliases ...)
#----------------------------------------------------
# hashes with official IANA-assigned names
# (as of 2012-03 - http://www.iana.org/assignments/hash-function-text-names)
#----------------------------------------------------
("md2", "md2"), # NOTE: openssl dropped md2 support in v1.0.0
("md5", "md5"),
("sha1", "sha-1"),
("sha224", "sha-224", "sha2-224"),
("sha256", "sha-256", "sha2-256"),
("sha384", "sha-384", "sha2-384"),
("sha512", "sha-512", "sha2-512"),
# TODO: add sha3 to this table.
#----------------------------------------------------
# hashlib/ssl-supported hashes without official IANA names,
# (hopefully-) compatible stand-ins have been chosen.
#----------------------------------------------------
("blake2b", "blake-2b"),
("blake2s", "blake-2s"),
("md4", "md4"),
# NOTE: there was an older "ripemd" and "ripemd-128",
# but python 2.7+ resolves "ripemd" -> "ripemd160",
# so treating "ripemd" as alias here.
("ripemd160", "ripemd-160", "ripemd"),
]
#: dict mapping hashlib names to hardcoded digest info;
#: so this is available even when hashes aren't present.
_fallback_info = {
# name: (digest_size, block_size)
'blake2b': (64, 128),
'blake2s': (32, 64),
'md4': (16, 64),
'md5': (16, 64),
'sha1': (20, 64),
'sha224': (28, 64),
'sha256': (32, 64),
'sha384': (48, 128),
'sha3_224': (28, 144),
'sha3_256': (32, 136),
'sha3_384': (48, 104),
'sha3_512': (64, 72),
'sha512': (64, 128),
'shake128': (16, 168),
'shake256': (32, 136),
}
def _gen_fallback_info():
"""
internal helper used to generate ``_fallback_info`` dict.
currently only run manually to update the above list;
not invoked at runtime.
"""
out = {}
for alg in sorted(hashlib.algorithms_available | set(["md4"])):
info = lookup_hash(alg)
out[info.name] = (info.digest_size, info.block_size)
return out
#: cache of hash info instances used by lookup_hash()
_hash_info_cache = {}
def _get_hash_aliases(name):
"""
internal helper used by :func:`lookup_hash` --
normalize arbitrary hash name to hashlib format.
if name not recognized, returns dummy record and issues a warning.
:arg name:
unnormalized name
:returns:
tuple with 2+ elements: ``(hashlib_name, iana_name|None, ... 0+ aliases)``.
"""
# normalize input
orig = name
if not isinstance(name, str):
name = to_native_str(name, 'utf-8', 'hash name')
name = re.sub("[_ /]", "-", name.strip().lower())
if name.startswith("scram-"): # helper for SCRAM protocol (see zdppy_password_hash.handlers.scram)
name = name[6:]
if name.endswith("-plus"):
name = name[:-5]
# look through standard names and known aliases
def check_table(name):
for row in _known_hash_names:
if name in row:
return row
result = check_table(name)
if result:
return result
# try to clean name up some more
m = re.match(r"(?i)^(?P<name>[a-z]+)-?(?P<rev>\d)?-?(?P<size>\d{3,4})?$", name)
if m:
# roughly follows "SHA2-256" style format, normalize representation,
# and checked table.
iana_name, rev, size = m.group("name", "rev", "size")
if rev:
iana_name += rev
hashlib_name = iana_name
if size:
iana_name += "-" + size
if rev:
hashlib_name += "_"
hashlib_name += size
result = check_table(iana_name)
if result:
return result
# not found in table, but roughly recognize format. use names we built up as fallback.
log.info("normalizing unrecognized hash name %r => %r / %r",
orig, hashlib_name, iana_name)
else:
# just can't make sense of it. return something
iana_name = name
hashlib_name = name.replace("-", "_")
log.warning("normalizing unrecognized hash name and format %r => %r / %r",
orig, hashlib_name, iana_name)
return hashlib_name, iana_name
def _get_hash_const(name):
"""
internal helper used by :func:`lookup_hash` --
lookup hash constructor by name
:arg name:
name (normalized to hashlib format, e.g. ``"sha256"``)
:returns:
hash constructor, e.g. ``hashlib.sha256()``;
or None if hash can't be located.
"""
# check hashlib.<attr> for an efficient constructor
if not name.startswith("_") and name not in ("new", "algorithms"):
try:
return getattr(hashlib, name)
except AttributeError:
pass
# check hashlib.new() in case SSL supports the digest
new_ssl_hash = hashlib.new
try:
# new() should throw ValueError if alg is unknown
new_ssl_hash(name, b"")
except ValueError:
pass
else:
# create wrapper function
# XXX: is there a faster way to wrap this?
def const(msg=b""):
return new_ssl_hash(name, msg)
const.__name__ = name
const.__module__ = "hashlib"
const.__doc__ = ("wrapper for hashlib.new(%r),\n"
"generated by zdppy_password_hash.crypto.digest.lookup_hash()") % name
return const
# use builtin md4 as fallback when not supported by hashlib
if name == "md4":
from zdppy_password_hash.crypto._md4 import md4
return md4
# XXX: any other modules / registries we should check?
# TODO: add pysha3 support.
return None
def lookup_hash(digest, # *,
return_unknown=False, required=True):
"""
Returns a :class:`HashInfo` record containing information about a given hash function.
Can be used to look up a hash constructor by name, normalize hash name representation, etc.
:arg digest:
This can be any of:
* A string containing a :mod:`!hashlib` digest name (e.g. ``"sha256"``),
* A string containing an IANA-assigned hash name,
* A digest constructor function (e.g. ``hashlib.sha256``).
Case is ignored, underscores are converted to hyphens,
and various other cleanups are made.
:param required:
By default (True), this function will throw an :exc:`~zdppy_password_hash.exc.UnknownHashError` if no hash constructor
can be found, or if the hash is not actually available.
If this flag is False, it will instead return a dummy :class:`!HashInfo` record
which will defer throwing the error until it's constructor function is called.
This is mainly used by :func:`norm_hash_name`.
:param return_unknown:
.. deprecated:: 1.7.3
deprecated, and will be removed in zdppy_password_hash 2.0.
this acts like inverse of **required**.
:returns HashInfo:
:class:`HashInfo` instance containing information about specified digest.
Multiple calls resolving to the same hash should always
return the same :class:`!HashInfo` instance.
"""
# check for cached entry
cache = _hash_info_cache
try:
return cache[digest]
except (KeyError, TypeError):
# NOTE: TypeError is to catch 'TypeError: unhashable type' (e.g. HashInfo)
pass
# legacy alias
if return_unknown:
required = False
# resolve ``digest`` to ``const`` & ``name_record``
cache_by_name = True
if isinstance(digest, unicode_or_bytes_types):
# normalize name
name_list = _get_hash_aliases(digest)
name = name_list[0]
assert name
# if name wasn't normalized to hashlib format,
# get info for normalized name and reuse it.
if name != digest:
info = lookup_hash(name, required=required)
cache[digest] = info
return info
# else look up constructor
# NOTE: may return None, which is handled by HashInfo constructor
const = _get_hash_const(name)
# if mock fips mode is enabled, replace with dummy constructor
# (to replicate how it would behave on a real fips system).
if const and mock_fips_mode and name not in _fips_algorithms:
def const(source=b""):
raise ValueError("%r disabled for fips by zdppy_password_hash set_mock_fips_mode()" % name)
elif isinstance(digest, HashInfo):
# handle border case where HashInfo is passed in.
return digest
elif callable(digest):
# try to lookup digest based on it's self-reported name
# (which we trust to be the canonical "hashlib" name)
const = digest
name_list = _get_hash_aliases(const().name)
name = name_list[0]
other_const = _get_hash_const(name)
if other_const is None:
# this is probably a third-party digest we don't know about,
# so just pass it on through, and register reverse lookup for it's name.
pass
elif other_const is const:
# if we got back same constructor, this is just a known stdlib constructor,
# which was passed in before we had cached it by name. proceed normally.
pass
else:
# if we got back different object, then ``const`` is something else
# (such as a mock object), in which case we want to skip caching it by name,
# as that would conflict with real hash.
cache_by_name = False
else:
raise exc.ExpectedTypeError(digest, "digest name or constructor", "digest")
# create new instance
info = HashInfo(const=const, names=name_list, required=required)
# populate cache
if const is not None:
cache[const] = info
if cache_by_name:
for name in name_list:
if name: # (skips iana name if it's empty)
assert cache.get(name) in [None, info], "%r already in cache" % name
cache[name] = info
return info
#: UT helper for clearing internal cache
lookup_hash.clear_cache = _hash_info_cache.clear
def norm_hash_name(name, format="hashlib"):
"""Normalize hash function name (convenience wrapper for :func:`lookup_hash`).
:arg name:
Original hash function name.
This name can be a Python :mod:`~hashlib` digest name,
a SCRAM mechanism name, IANA assigned hash name, etc.
Case is ignored, and underscores are converted to hyphens.
:param format:
Naming convention to normalize to.
Possible values are:
* ``"hashlib"`` (the default) - normalizes name to be compatible
with Python's :mod:`!hashlib`.
* ``"iana"`` - normalizes name to IANA-assigned hash function name.
For hashes which IANA hasn't assigned a name for, this issues a warning,
and then uses a heuristic to return a "best guess" name.
:returns:
Hash name, returned as native :class:`!str`.
"""
info = lookup_hash(name, required=False)
if info.unknown:
warn("norm_hash_name(): " + info.error_text, exc.PasslibRuntimeWarning)
if format == "hashlib":
return info.name
elif format == "iana":
return info.iana_name
else:
raise ValueError("unknown format: %r" % (format,))
class HashInfo(SequenceMixin):
"""
Record containing information about a given hash algorithm, as returned :func:`lookup_hash`.
This class exposes the following attributes:
.. autoattribute:: const
.. autoattribute:: digest_size
.. autoattribute:: block_size
.. autoattribute:: name
.. autoattribute:: iana_name
.. autoattribute:: aliases
.. autoattribute:: supported
This object can also be treated a 3-element sequence
containing ``(const, digest_size, block_size)``.
"""
#=========================================================================
# instance attrs
#=========================================================================
#: Canonical / hashlib-compatible name (e.g. ``"sha256"``).
name = None
#: IANA assigned name (e.g. ``"sha-256"``), may be ``None`` if unknown.
iana_name = None
#: Tuple of other known aliases (may be empty)
aliases = ()
#: Hash constructor function (e.g. :func:`hashlib.sha256`)
const = None
#: Hash's digest size
digest_size = None
#: Hash's block size
block_size = None
#: set when hash isn't available, will be filled in with string containing error text
#: that const() will raise.
error_text = None
#: set when error_text is due to hash algorithm being completely unknown
#: (not just unavailable on current system)
unknown = False
#=========================================================================
# init
#=========================================================================
def __init__(self, # *,
const, names, required=True):
"""
initialize new instance.
:arg const:
hash constructor
:arg names:
list of 2+ names. should be list of ``(name, iana_name, ... 0+ aliases)``.
names must be lower-case. only iana name may be None.
"""
# init names
name = self.name = names[0]
self.iana_name = names[1]
self.aliases = names[2:]
def use_stub_const(msg):
"""
helper that installs stub constructor which throws specified error <msg>.
"""
def const(source=b""):
raise exc.UnknownHashError(msg, name)
if required:
# if caller only wants supported digests returned,
# just throw error immediately...
const()
assert "shouldn't get here"
self.error_text = msg
self.const = const
try:
self.digest_size, self.block_size = _fallback_info[name]
except KeyError:
pass
# handle "constructor not available" case
if const is None:
if names in _known_hash_names:
msg = "unsupported hash: %r" % name
else:
msg = "unknown hash: %r" % name
self.unknown = True
use_stub_const(msg)
# TODO: load in preset digest size info for known hashes.
return
# create hash instance to inspect
try:
hash = const()
except ValueError as err:
# per issue 116, FIPS compliant systems will have a constructor;
# but it will throw a ValueError with this message. As of 1.7.3,
# translating this into DisabledHashError.
# "ValueError: error:060800A3:digital envelope routines:EVP_DigestInit_ex:disabled for fips"
if "disabled for fips" in str(err).lower():
msg = "%r hash disabled for fips" % name
else:
msg = "internal error in %r constructor\n(%s: %s)" % (name, type(err).__name__, err)
use_stub_const(msg)
return
# store stats about hash
self.const = const
self.digest_size = hash.digest_size
self.block_size = hash.block_size
# do sanity check on digest size
if len(hash.digest()) != hash.digest_size:
raise RuntimeError("%r constructor failed sanity check" % self.name)
# do sanity check on name.
if hash.name != self.name:
warn("inconsistent digest name: %r resolved to %r, which reports name as %r" %
(self.name, const, hash.name), exc.PasslibRuntimeWarning)
#=========================================================================
# methods
#=========================================================================
def __repr__(self):
return "<lookup_hash(%r): digest_size=%r block_size=%r)" % \
(self.name, self.digest_size, self.block_size)
def _as_tuple(self):
return self.const, self.digest_size, self.block_size
@memoized_property
def supported(self):
"""
whether hash is available for use
(if False, constructor will throw UnknownHashError if called)
"""
return self.error_text is None
@memoized_property
def supported_by_fastpbkdf2(self):
"""helper to detect if hash is supported by fastpbkdf2()"""
if not _fast_pbkdf2_hmac:
return None
try:
_fast_pbkdf2_hmac(self.name, b"p", b"s", 1)
return True
except ValueError:
# "unsupported hash type"
return False
@memoized_property
def supported_by_hashlib_pbkdf2(self):
"""helper to detect if hash is supported by hashlib.pbkdf2_hmac()"""
if not _stdlib_pbkdf2_hmac:
return None
try:
_stdlib_pbkdf2_hmac(self.name, b"p", b"s", 1)
return True
except ValueError:
# "unsupported hash type"
return False
#=========================================================================
# eoc
#=========================================================================
#---------------------------------------------------------------------
# mock fips mode monkeypatch
#---------------------------------------------------------------------
#: flag for detecting if mock fips mode is enabled.
mock_fips_mode = False
#: algorithms allowed under FIPS mode (subset of hashlib.algorithms_available);
#: per https://csrc.nist.gov/Projects/Hash-Functions FIPS 202 list.
_fips_algorithms = set([
# FIPS 180-4 and FIPS 202
'sha1',
'sha224',
'sha256',
'sha384',
'sha512',
# 'sha512/224',
# 'sha512/256',
# FIPS 202 only
'sha3_224',
'sha3_256',
'sha3_384',
'sha3_512',
'shake_128',
'shake_256',
])
def _set_mock_fips_mode(enable=True):
"""
UT helper which monkeypatches lookup_hash() internals to replicate FIPS mode.
"""
global mock_fips_mode
mock_fips_mode = enable
lookup_hash.clear_cache()
# helper for UTs
if as_bool(os.environ.get("PASSLIB_MOCK_FIPS_MODE")):
_set_mock_fips_mode()
#=============================================================================
# hmac utils
#=============================================================================
#: translation tables used by compile_hmac()
_TRANS_5C = join_byte_values((x ^ 0x5C) for x in irange(256))
_TRANS_36 = join_byte_values((x ^ 0x36) for x in irange(256))
def compile_hmac(digest, key, multipart=False):
"""
This function returns an efficient HMAC function, hardcoded with a specific digest & key.
It can be used via ``hmac = compile_hmac(digest, key)``.
:arg digest:
digest name or constructor.
:arg key:
secret key as :class:`!bytes` or :class:`!unicode` (unicode will be encoded using utf-8).
:param multipart:
request a multipart constructor instead (see return description).
:returns:
By default, the returned function has the signature ``hmac(msg) -> digest output``.
However, if ``multipart=True``, the returned function has the signature
``hmac() -> update, finalize``, where ``update(msg)`` may be called multiple times,
and ``finalize() -> digest_output`` may be repeatedly called at any point to
calculate the HMAC digest so far.
The returned object will also have a ``digest_info`` attribute, containing
a :class:`lookup_hash` instance for the specified digest.
This function exists, and has the weird signature it does, in order to squeeze as
provide as much efficiency as possible, by omitting much of the setup cost
and features of the stdlib :mod:`hmac` module.
"""
# all the following was adapted from stdlib's hmac module
# resolve digest (cached)
digest_info = lookup_hash(digest)
const, digest_size, block_size = digest_info
assert block_size >= 16, "block size too small"
# prepare key
if not isinstance(key, bytes):
key = to_bytes(key, param="key")
klen = len(key)
if klen > block_size:
key = const(key).digest()
klen = digest_size
if klen < block_size:
key += b'\x00' * (block_size - klen)
# create pre-initialized hash constructors
_inner_copy = const(key.translate(_TRANS_36)).copy
_outer_copy = const(key.translate(_TRANS_5C)).copy
if multipart:
# create multi-part function
# NOTE: this is slightly slower than the single-shot version,
# and should only be used if needed.
def hmac():
"""generated by compile_hmac(multipart=True)"""
inner = _inner_copy()
def finalize():
outer = _outer_copy()
outer.update(inner.digest())
return outer.digest()
return inner.update, finalize
else:
# single-shot function
def hmac(msg):
"""generated by compile_hmac()"""
inner = _inner_copy()
inner.update(msg)
outer = _outer_copy()
outer.update(inner.digest())
return outer.digest()
# add info attr
hmac.digest_info = digest_info
return hmac
#=============================================================================
# pbkdf1
#=============================================================================
def pbkdf1(digest, secret, salt, rounds, keylen=None):
"""pkcs#5 password-based key derivation v1.5
:arg digest:
digest name or constructor.
:arg secret:
secret to use when generating the key.
may be :class:`!bytes` or :class:`unicode` (encoded using UTF-8).
:arg salt:
salt string to use when generating key.
may be :class:`!bytes` or :class:`unicode` (encoded using UTF-8).
:param rounds:
number of rounds to use to generate key.
:arg keylen:
number of bytes to generate (if omitted / ``None``, uses digest's native size)
:returns:
raw :class:`bytes` of generated key
.. note::
This algorithm has been deprecated, new code should use PBKDF2.
Among other limitations, ``keylen`` cannot be larger
than the digest size of the specified hash.
"""
# resolve digest
const, digest_size, block_size = lookup_hash(digest)
# validate secret & salt
secret = to_bytes(secret, param="secret")
salt = to_bytes(salt, param="salt")
# validate rounds
if not isinstance(rounds, int_types):
raise exc.ExpectedTypeError(rounds, "int", "rounds")
if rounds < 1:
raise ValueError("rounds must be at least 1")
# validate keylen
if keylen is None:
keylen = digest_size
elif not isinstance(keylen, int_types):
raise exc.ExpectedTypeError(keylen, "int or None", "keylen")
elif keylen < 0:
raise ValueError("keylen must be at least 0")
elif keylen > digest_size:
raise ValueError("keylength too large for digest: %r > %r" %
(keylen, digest_size))
# main pbkdf1 loop
block = secret + salt
for _ in irange(rounds):
block = const(block).digest()
return block[:keylen]
#=============================================================================
# pbkdf2
#=============================================================================
_pack_uint32 = Struct(">L").pack
def pbkdf2_hmac(digest, secret, salt, rounds, keylen=None):
"""pkcs#5 password-based key derivation v2.0 using HMAC + arbitrary digest.
:arg digest:
digest name or constructor.
:arg secret:
passphrase to use to generate key.
may be :class:`!bytes` or :class:`unicode` (encoded using UTF-8).
:arg salt:
salt string to use when generating key.
may be :class:`!bytes` or :class:`unicode` (encoded using UTF-8).
:param rounds:
number of rounds to use to generate key.
:arg keylen:
number of bytes to generate.
if omitted / ``None``, will use digest's native output size.
:returns:
raw bytes of generated key
.. versionchanged:: 1.7
This function will use the first available of the following backends:
* `fastpbk2 <https://pypi.python.org/pypi/fastpbkdf2>`_
* :func:`hashlib.pbkdf2_hmac` (only available in py2 >= 2.7.8, and py3 >= 3.4)
* builtin pure-python backend
See :data:`zdppy_password_hash.crypto.digest.PBKDF2_BACKENDS` to determine
which backend(s) are in use.
"""
# validate secret & salt
secret = to_bytes(secret, param="secret")
salt = to_bytes(salt, param="salt")
# resolve digest
digest_info = lookup_hash(digest)
digest_size = digest_info.digest_size
# validate rounds
if not isinstance(rounds, int_types):
raise exc.ExpectedTypeError(rounds, "int", "rounds")
if rounds < 1:
raise ValueError("rounds must be at least 1")
# validate keylen
if keylen is None:
keylen = digest_size
elif not isinstance(keylen, int_types):
raise exc.ExpectedTypeError(keylen, "int or None", "keylen")
elif keylen < 1:
# XXX: could allow keylen=0, but want to be compat w/ stdlib
raise ValueError("keylen must be at least 1")
# find smallest block count s.t. keylen <= block_count * digest_size;
# make sure block count won't overflow (per pbkdf2 spec)
# this corresponds to throwing error if keylen > digest_size * MAX_UINT32
# NOTE: stdlib will throw error at lower bound (keylen > MAX_SINT32)
# NOTE: have do this before other backends checked, since fastpbkdf2 raises wrong error
# (InvocationError, not OverflowError)
block_count = (keylen + digest_size - 1) // digest_size
if block_count > MAX_UINT32:
raise OverflowError("keylen too long for digest")
#
# check for various high-speed backends
#
# ~3x faster than pure-python backend
# NOTE: have to do this after above guards since fastpbkdf2 lacks bounds checks.
if digest_info.supported_by_fastpbkdf2:
return _fast_pbkdf2_hmac(digest_info.name, secret, salt, rounds, keylen)
# ~1.4x faster than pure-python backend
# NOTE: have to do this after fastpbkdf2 since hashlib-ssl is slower,
# will support larger number of hashes.
if digest_info.supported_by_hashlib_pbkdf2:
return _stdlib_pbkdf2_hmac(digest_info.name, secret, salt, rounds, keylen)
#
# otherwise use our own implementation
#
# generated keyed hmac
keyed_hmac = compile_hmac(digest, secret)
# get helper to calculate pbkdf2 inner loop efficiently
calc_block = _get_pbkdf2_looper(digest_size)
# assemble & return result
return join_bytes(
calc_block(keyed_hmac, keyed_hmac(salt + _pack_uint32(i)), rounds)
for i in irange(1, block_count + 1)
)[:keylen]
#-------------------------------------------------------------------------------------
# pick best choice for pure-python helper
# TODO: consider some alternatives, such as C-accelerated xor_bytes helper if available
#-------------------------------------------------------------------------------------
# NOTE: this env var is only present to support the admin/benchmark_pbkdf2 script
_force_backend = os.environ.get("PASSLIB_PBKDF2_BACKEND") or "any"
if PY3 and _force_backend in ["any", "from-bytes"]:
from functools import partial
def _get_pbkdf2_looper(digest_size):
return partial(_pbkdf2_looper, digest_size)
def _pbkdf2_looper(digest_size, keyed_hmac, digest, rounds):
"""
py3-only implementation of pbkdf2 inner loop;
uses 'int.from_bytes' + integer XOR
"""
from_bytes = int.from_bytes
BIG = "big" # endianess doesn't matter, just has to be consistent
accum = from_bytes(digest, BIG)
for _ in irange(rounds - 1):
digest = keyed_hmac(digest)
accum ^= from_bytes(digest, BIG)
return accum.to_bytes(digest_size, BIG)
_builtin_backend = "from-bytes"
elif _force_backend in ["any", "unpack", "from-bytes"]:
from struct import Struct
from zdppy_password_hash.utils import sys_bits
_have_64_bit = (sys_bits >= 64)
#: cache used by _get_pbkdf2_looper
_looper_cache = {}
def _get_pbkdf2_looper(digest_size):
"""
We want a helper function which performs equivalent of the following::
def helper(keyed_hmac, digest, rounds):
accum = digest
for _ in irange(rounds - 1):
digest = keyed_hmac(digest)
accum ^= digest
return accum
However, no efficient way to implement "bytes ^ bytes" in python.
Instead, using approach where we dynamically compile a helper function based
on digest size. Instead of a single `accum` var, this helper breaks the digest
into a series of integers.
It stores these in a series of`accum_<i>` vars, and performs `accum ^= digest`
by unpacking digest and perform xor for each "accum_<i> ^= digest_<i>".
this keeps everything in locals, avoiding excessive list creation, encoding or decoding,
etc.
:param digest_size:
digest size to compile for, in bytes. (must be multiple of 4).
:return:
helper function with call signature outlined above.
"""
#
# cache helpers
#
try:
return _looper_cache[digest_size]
except KeyError:
pass
#
# figure out most efficient struct format to unpack digest into list of native ints
#
if _have_64_bit and not digest_size & 0x7:
# digest size multiple of 8, on a 64 bit system -- use array of UINT64
count = (digest_size >> 3)
fmt = "=%dQ" % count
elif not digest_size & 0x3:
if _have_64_bit:
# digest size multiple of 4, on a 64 bit system -- use array of UINT64 + 1 UINT32
count = (digest_size >> 3)
fmt = "=%dQI" % count
count += 1
else:
# digest size multiple of 4, on a 32 bit system -- use array of UINT32
count = (digest_size >> 2)
fmt = "=%dI" % count
else:
# stopping here, cause no known hashes have digest size that isn't multiple of 4 bytes.
# if needed, could go crazy w/ "H" & "B"
raise NotImplementedError("unsupported digest size: %d" % digest_size)
struct = Struct(fmt)
#
# build helper source
#
tdict = dict(
digest_size=digest_size,
accum_vars=", ".join("acc_%d" % i for i in irange(count)),
digest_vars=", ".join("dig_%d" % i for i in irange(count)),
)
# head of function
source = (
"def helper(keyed_hmac, digest, rounds):\n"
" '''pbkdf2 loop helper for digest_size={digest_size}'''\n"
" unpack_digest = struct.unpack\n"
" {accum_vars} = unpack_digest(digest)\n"
" for _ in irange(1, rounds):\n"
" digest = keyed_hmac(digest)\n"
" {digest_vars} = unpack_digest(digest)\n"
).format(**tdict)
# xor digest
for i in irange(count):
source += " acc_%d ^= dig_%d\n" % (i, i)
# return result
source += " return struct.pack({accum_vars})\n".format(**tdict)
#
# compile helper
#
code = compile(source, "<generated by zdppy_password_hash.crypto.digest._get_pbkdf2_looper()>", "exec")
gdict = dict(irange=irange, struct=struct)
ldict = dict()
eval(code, gdict, ldict)
helper = ldict['helper']
if __debug__:
helper.__source__ = source
#
# store in cache
#
_looper_cache[digest_size] = helper
return helper
_builtin_backend = "unpack"
else:
assert _force_backend in ["any", "hexlify"]
# XXX: older & slower approach that used int(hexlify()),
# keeping it around for a little while just for benchmarking.
from binascii import hexlify as _hexlify
from zdppy_password_hash.utils import int_to_bytes
def _get_pbkdf2_looper(digest_size):
return _pbkdf2_looper
def _pbkdf2_looper(keyed_hmac, digest, rounds):
hexlify = _hexlify
accum = int(hexlify(digest), 16)
for _ in irange(rounds - 1):
digest = keyed_hmac(digest)
accum ^= int(hexlify(digest), 16)
return int_to_bytes(accum, len(digest))
_builtin_backend = "hexlify"
# helper for benchmark script -- disable hashlib, fastpbkdf2 support if builtin requested
if _force_backend == _builtin_backend:
_fast_pbkdf2_hmac = _stdlib_pbkdf2_hmac = None
# expose info about what backends are active
PBKDF2_BACKENDS = [b for b in [
"fastpbkdf2" if _fast_pbkdf2_hmac else None,
"hashlib-ssl" if _stdlib_pbkdf2_hmac else None,
"builtin-" + _builtin_backend
] if b]
# *very* rough estimate of relative speed (compared to sha256 using 'unpack' backend on 64bit arch)
if "fastpbkdf2" in PBKDF2_BACKENDS:
PBKDF2_SPEED_FACTOR = 3
elif "hashlib-ssl" in PBKDF2_BACKENDS:
PBKDF2_SPEED_FACTOR = 1.4
else:
# remaining backends have *some* difference in performance, but not enough to matter
PBKDF2_SPEED_FACTOR = 1
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/digest.py | digest.py |
from binascii import hexlify
import struct
# site
from zdppy_password_hash.utils.compat import bascii_to_str, irange, PY3
# local
__all__ = ["md4"]
#=============================================================================
# utils
#=============================================================================
def F(x,y,z):
return (x&y) | ((~x) & z)
def G(x,y,z):
return (x&y) | (x&z) | (y&z)
##def H(x,y,z):
## return x ^ y ^ z
MASK_32 = 2**32-1
#=============================================================================
# main class
#=============================================================================
class md4(object):
"""pep-247 compatible implementation of MD4 hash algorithm
.. attribute:: digest_size
size of md4 digest in bytes (16 bytes)
.. method:: update
update digest by appending additional content
.. method:: copy
create clone of digest object, including current state
.. method:: digest
return bytes representing md4 digest of current content
.. method:: hexdigest
return hexadecimal version of digest
"""
# FIXME: make this follow hash object PEP better.
# FIXME: this isn't threadsafe
name = "md4"
digest_size = digestsize = 16
block_size = 64
_count = 0 # number of 64-byte blocks processed so far (not including _buf)
_state = None # list of [a,b,c,d] 32 bit ints used as internal register
_buf = None # data processed in 64 byte blocks, this holds leftover from last update
def __init__(self, content=None):
self._count = 0
self._state = [0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476]
self._buf = b''
if content:
self.update(content)
# round 1 table - [abcd k s]
_round1 = [
[0,1,2,3, 0,3],
[3,0,1,2, 1,7],
[2,3,0,1, 2,11],
[1,2,3,0, 3,19],
[0,1,2,3, 4,3],
[3,0,1,2, 5,7],
[2,3,0,1, 6,11],
[1,2,3,0, 7,19],
[0,1,2,3, 8,3],
[3,0,1,2, 9,7],
[2,3,0,1, 10,11],
[1,2,3,0, 11,19],
[0,1,2,3, 12,3],
[3,0,1,2, 13,7],
[2,3,0,1, 14,11],
[1,2,3,0, 15,19],
]
# round 2 table - [abcd k s]
_round2 = [
[0,1,2,3, 0,3],
[3,0,1,2, 4,5],
[2,3,0,1, 8,9],
[1,2,3,0, 12,13],
[0,1,2,3, 1,3],
[3,0,1,2, 5,5],
[2,3,0,1, 9,9],
[1,2,3,0, 13,13],
[0,1,2,3, 2,3],
[3,0,1,2, 6,5],
[2,3,0,1, 10,9],
[1,2,3,0, 14,13],
[0,1,2,3, 3,3],
[3,0,1,2, 7,5],
[2,3,0,1, 11,9],
[1,2,3,0, 15,13],
]
# round 3 table - [abcd k s]
_round3 = [
[0,1,2,3, 0,3],
[3,0,1,2, 8,9],
[2,3,0,1, 4,11],
[1,2,3,0, 12,15],
[0,1,2,3, 2,3],
[3,0,1,2, 10,9],
[2,3,0,1, 6,11],
[1,2,3,0, 14,15],
[0,1,2,3, 1,3],
[3,0,1,2, 9,9],
[2,3,0,1, 5,11],
[1,2,3,0, 13,15],
[0,1,2,3, 3,3],
[3,0,1,2, 11,9],
[2,3,0,1, 7,11],
[1,2,3,0, 15,15],
]
def _process(self, block):
"""process 64 byte block"""
# unpack block into 16 32-bit ints
X = struct.unpack("<16I", block)
# clone state
orig = self._state
state = list(orig)
# round 1 - F function - (x&y)|(~x & z)
for a,b,c,d,k,s in self._round1:
t = (state[a] + F(state[b],state[c],state[d]) + X[k]) & MASK_32
state[a] = ((t<<s) & MASK_32) + (t>>(32-s))
# round 2 - G function
for a,b,c,d,k,s in self._round2:
t = (state[a] + G(state[b],state[c],state[d]) + X[k] + 0x5a827999) & MASK_32
state[a] = ((t<<s) & MASK_32) + (t>>(32-s))
# round 3 - H function - x ^ y ^ z
for a,b,c,d,k,s in self._round3:
t = (state[a] + (state[b] ^ state[c] ^ state[d]) + X[k] + 0x6ed9eba1) & MASK_32
state[a] = ((t<<s) & MASK_32) + (t>>(32-s))
# add back into original state
for i in irange(4):
orig[i] = (orig[i]+state[i]) & MASK_32
def update(self, content):
if not isinstance(content, bytes):
if PY3:
raise TypeError("expected bytes")
else:
# replicate behavior of hashlib under py2
content = content.encode("ascii")
buf = self._buf
if buf:
content = buf + content
idx = 0
end = len(content)
while True:
next = idx + 64
if next <= end:
self._process(content[idx:next])
self._count += 1
idx = next
else:
self._buf = content[idx:]
return
def copy(self):
other = md4()
other._count = self._count
other._state = list(self._state)
other._buf = self._buf
return other
def digest(self):
# NOTE: backing up state so we can restore it after _process is called,
# in case object is updated again (this is only attr altered by this method)
orig = list(self._state)
# final block: buf + 0x80,
# then 0x00 padding until congruent w/ 56 mod 64 bytes
# then last 8 bytes = msg length in bits
buf = self._buf
msglen = self._count*512 + len(buf)*8
block = buf + b'\x80' + b'\x00' * ((119-len(buf)) % 64) + \
struct.pack("<2I", msglen & MASK_32, (msglen>>32) & MASK_32)
if len(block) == 128:
self._process(block[:64])
self._process(block[64:])
else:
assert len(block) == 64
self._process(block)
# render digest & restore un-finalized state
out = struct.pack("<4I", *self._state)
self._state = orig
return out
def hexdigest(self):
return bascii_to_str(hexlify(self.digest()))
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/_md4.py | _md4.py |
#=============================================================================
# imports
#=============================================================================
# core
import struct
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.utils.compat import join_byte_values, byte_elem_value, \
irange, irange, int_types
# local
__all__ = [
"expand_des_key",
"des_encrypt_block",
]
#=============================================================================
# constants
#=============================================================================
# masks/upper limits for various integer sizes
INT_24_MASK = 0xffffff
INT_56_MASK = 0xffffffffffffff
INT_64_MASK = 0xffffffffffffffff
# mask to clear parity bits from 64-bit key
_KDATA_MASK = 0xfefefefefefefefe
_KPARITY_MASK = 0x0101010101010101
# mask used to setup key schedule
_KS_MASK = 0xfcfcfcfcffffffff
#=============================================================================
# static DES tables
#=============================================================================
# placeholders filled in by _load_tables()
PCXROT = IE3264 = SPE = CF6464 = None
def _load_tables():
"""delay loading tables until they are actually needed"""
global PCXROT, IE3264, SPE, CF6464
#---------------------------------------------------------------
# Initial key schedule permutation
# PC1ROT - bit reverse, then PC1, then Rotate, then PC2
#---------------------------------------------------------------
# NOTE: this was reordered from original table to make perm3264 logic simpler
PC1ROT=(
( 0x0000000000000000, 0x0000000000000000, 0x0000000000002000, 0x0000000000002000,
0x0000000000000020, 0x0000000000000020, 0x0000000000002020, 0x0000000000002020,
0x0000000000000400, 0x0000000000000400, 0x0000000000002400, 0x0000000000002400,
0x0000000000000420, 0x0000000000000420, 0x0000000000002420, 0x0000000000002420, ),
( 0x0000000000000000, 0x2000000000000000, 0x0000000400000000, 0x2000000400000000,
0x0000800000000000, 0x2000800000000000, 0x0000800400000000, 0x2000800400000000,
0x0008000000000000, 0x2008000000000000, 0x0008000400000000, 0x2008000400000000,
0x0008800000000000, 0x2008800000000000, 0x0008800400000000, 0x2008800400000000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000040, 0x0000000000000040,
0x0000000020000000, 0x0000000020000000, 0x0000000020000040, 0x0000000020000040,
0x0000000000200000, 0x0000000000200000, 0x0000000000200040, 0x0000000000200040,
0x0000000020200000, 0x0000000020200000, 0x0000000020200040, 0x0000000020200040, ),
( 0x0000000000000000, 0x0002000000000000, 0x0800000000000000, 0x0802000000000000,
0x0100000000000000, 0x0102000000000000, 0x0900000000000000, 0x0902000000000000,
0x4000000000000000, 0x4002000000000000, 0x4800000000000000, 0x4802000000000000,
0x4100000000000000, 0x4102000000000000, 0x4900000000000000, 0x4902000000000000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000040000, 0x0000000000040000,
0x0000020000000000, 0x0000020000000000, 0x0000020000040000, 0x0000020000040000,
0x0000000000000004, 0x0000000000000004, 0x0000000000040004, 0x0000000000040004,
0x0000020000000004, 0x0000020000000004, 0x0000020000040004, 0x0000020000040004, ),
( 0x0000000000000000, 0x0000400000000000, 0x0200000000000000, 0x0200400000000000,
0x0080000000000000, 0x0080400000000000, 0x0280000000000000, 0x0280400000000000,
0x0000008000000000, 0x0000408000000000, 0x0200008000000000, 0x0200408000000000,
0x0080008000000000, 0x0080408000000000, 0x0280008000000000, 0x0280408000000000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000010000000, 0x0000000010000000,
0x0000000000001000, 0x0000000000001000, 0x0000000010001000, 0x0000000010001000,
0x0000000040000000, 0x0000000040000000, 0x0000000050000000, 0x0000000050000000,
0x0000000040001000, 0x0000000040001000, 0x0000000050001000, 0x0000000050001000, ),
( 0x0000000000000000, 0x0000001000000000, 0x0000080000000000, 0x0000081000000000,
0x1000000000000000, 0x1000001000000000, 0x1000080000000000, 0x1000081000000000,
0x0004000000000000, 0x0004001000000000, 0x0004080000000000, 0x0004081000000000,
0x1004000000000000, 0x1004001000000000, 0x1004080000000000, 0x1004081000000000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000080, 0x0000000000000080,
0x0000000000080000, 0x0000000000080000, 0x0000000000080080, 0x0000000000080080,
0x0000000000800000, 0x0000000000800000, 0x0000000000800080, 0x0000000000800080,
0x0000000000880000, 0x0000000000880000, 0x0000000000880080, 0x0000000000880080, ),
( 0x0000000000000000, 0x0000000008000000, 0x0000002000000000, 0x0000002008000000,
0x0000100000000000, 0x0000100008000000, 0x0000102000000000, 0x0000102008000000,
0x0000200000000000, 0x0000200008000000, 0x0000202000000000, 0x0000202008000000,
0x0000300000000000, 0x0000300008000000, 0x0000302000000000, 0x0000302008000000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000400000, 0x0000000000400000,
0x0000000004000000, 0x0000000004000000, 0x0000000004400000, 0x0000000004400000,
0x0000000000000800, 0x0000000000000800, 0x0000000000400800, 0x0000000000400800,
0x0000000004000800, 0x0000000004000800, 0x0000000004400800, 0x0000000004400800, ),
( 0x0000000000000000, 0x0000000000008000, 0x0040000000000000, 0x0040000000008000,
0x0000004000000000, 0x0000004000008000, 0x0040004000000000, 0x0040004000008000,
0x8000000000000000, 0x8000000000008000, 0x8040000000000000, 0x8040000000008000,
0x8000004000000000, 0x8000004000008000, 0x8040004000000000, 0x8040004000008000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000004000, 0x0000000000004000,
0x0000000000000008, 0x0000000000000008, 0x0000000000004008, 0x0000000000004008,
0x0000000000000010, 0x0000000000000010, 0x0000000000004010, 0x0000000000004010,
0x0000000000000018, 0x0000000000000018, 0x0000000000004018, 0x0000000000004018, ),
( 0x0000000000000000, 0x0000000200000000, 0x0001000000000000, 0x0001000200000000,
0x0400000000000000, 0x0400000200000000, 0x0401000000000000, 0x0401000200000000,
0x0020000000000000, 0x0020000200000000, 0x0021000000000000, 0x0021000200000000,
0x0420000000000000, 0x0420000200000000, 0x0421000000000000, 0x0421000200000000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000010000000000, 0x0000010000000000,
0x0000000100000000, 0x0000000100000000, 0x0000010100000000, 0x0000010100000000,
0x0000000000100000, 0x0000000000100000, 0x0000010000100000, 0x0000010000100000,
0x0000000100100000, 0x0000000100100000, 0x0000010100100000, 0x0000010100100000, ),
( 0x0000000000000000, 0x0000000080000000, 0x0000040000000000, 0x0000040080000000,
0x0010000000000000, 0x0010000080000000, 0x0010040000000000, 0x0010040080000000,
0x0000000800000000, 0x0000000880000000, 0x0000040800000000, 0x0000040880000000,
0x0010000800000000, 0x0010000880000000, 0x0010040800000000, 0x0010040880000000, ),
)
#---------------------------------------------------------------
# Subsequent key schedule rotation permutations
# PC2ROT - PC2 inverse, then Rotate, then PC2
#---------------------------------------------------------------
# NOTE: this was reordered from original table to make perm3264 logic simpler
PC2ROTA=(
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000200000, 0x0000000000200000, 0x0000000000200000, 0x0000000000200000,
0x0000000004000000, 0x0000000004000000, 0x0000000004000000, 0x0000000004000000,
0x0000000004200000, 0x0000000004200000, 0x0000000004200000, 0x0000000004200000, ),
( 0x0000000000000000, 0x0000000000000800, 0x0000010000000000, 0x0000010000000800,
0x0000000000002000, 0x0000000000002800, 0x0000010000002000, 0x0000010000002800,
0x0000000010000000, 0x0000000010000800, 0x0000010010000000, 0x0000010010000800,
0x0000000010002000, 0x0000000010002800, 0x0000010010002000, 0x0000010010002800, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000100000000, 0x0000000100000000, 0x0000000100000000, 0x0000000100000000,
0x0000000000800000, 0x0000000000800000, 0x0000000000800000, 0x0000000000800000,
0x0000000100800000, 0x0000000100800000, 0x0000000100800000, 0x0000000100800000, ),
( 0x0000000000000000, 0x0000020000000000, 0x0000000080000000, 0x0000020080000000,
0x0000000000400000, 0x0000020000400000, 0x0000000080400000, 0x0000020080400000,
0x0000000008000000, 0x0000020008000000, 0x0000000088000000, 0x0000020088000000,
0x0000000008400000, 0x0000020008400000, 0x0000000088400000, 0x0000020088400000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000040, 0x0000000000000040, 0x0000000000000040, 0x0000000000000040,
0x0000000000001000, 0x0000000000001000, 0x0000000000001000, 0x0000000000001000,
0x0000000000001040, 0x0000000000001040, 0x0000000000001040, 0x0000000000001040, ),
( 0x0000000000000000, 0x0000000000000010, 0x0000000000000400, 0x0000000000000410,
0x0000000000000080, 0x0000000000000090, 0x0000000000000480, 0x0000000000000490,
0x0000000040000000, 0x0000000040000010, 0x0000000040000400, 0x0000000040000410,
0x0000000040000080, 0x0000000040000090, 0x0000000040000480, 0x0000000040000490, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000080000, 0x0000000000080000, 0x0000000000080000, 0x0000000000080000,
0x0000000000100000, 0x0000000000100000, 0x0000000000100000, 0x0000000000100000,
0x0000000000180000, 0x0000000000180000, 0x0000000000180000, 0x0000000000180000, ),
( 0x0000000000000000, 0x0000000000040000, 0x0000000000000020, 0x0000000000040020,
0x0000000000000004, 0x0000000000040004, 0x0000000000000024, 0x0000000000040024,
0x0000000200000000, 0x0000000200040000, 0x0000000200000020, 0x0000000200040020,
0x0000000200000004, 0x0000000200040004, 0x0000000200000024, 0x0000000200040024, ),
( 0x0000000000000000, 0x0000000000000008, 0x0000000000008000, 0x0000000000008008,
0x0010000000000000, 0x0010000000000008, 0x0010000000008000, 0x0010000000008008,
0x0020000000000000, 0x0020000000000008, 0x0020000000008000, 0x0020000000008008,
0x0030000000000000, 0x0030000000000008, 0x0030000000008000, 0x0030000000008008, ),
( 0x0000000000000000, 0x0000400000000000, 0x0000080000000000, 0x0000480000000000,
0x0000100000000000, 0x0000500000000000, 0x0000180000000000, 0x0000580000000000,
0x4000000000000000, 0x4000400000000000, 0x4000080000000000, 0x4000480000000000,
0x4000100000000000, 0x4000500000000000, 0x4000180000000000, 0x4000580000000000, ),
( 0x0000000000000000, 0x0000000000004000, 0x0000000020000000, 0x0000000020004000,
0x0001000000000000, 0x0001000000004000, 0x0001000020000000, 0x0001000020004000,
0x0200000000000000, 0x0200000000004000, 0x0200000020000000, 0x0200000020004000,
0x0201000000000000, 0x0201000000004000, 0x0201000020000000, 0x0201000020004000, ),
( 0x0000000000000000, 0x1000000000000000, 0x0004000000000000, 0x1004000000000000,
0x0002000000000000, 0x1002000000000000, 0x0006000000000000, 0x1006000000000000,
0x0000000800000000, 0x1000000800000000, 0x0004000800000000, 0x1004000800000000,
0x0002000800000000, 0x1002000800000000, 0x0006000800000000, 0x1006000800000000, ),
( 0x0000000000000000, 0x0040000000000000, 0x2000000000000000, 0x2040000000000000,
0x0000008000000000, 0x0040008000000000, 0x2000008000000000, 0x2040008000000000,
0x0000001000000000, 0x0040001000000000, 0x2000001000000000, 0x2040001000000000,
0x0000009000000000, 0x0040009000000000, 0x2000009000000000, 0x2040009000000000, ),
( 0x0000000000000000, 0x0400000000000000, 0x8000000000000000, 0x8400000000000000,
0x0000002000000000, 0x0400002000000000, 0x8000002000000000, 0x8400002000000000,
0x0100000000000000, 0x0500000000000000, 0x8100000000000000, 0x8500000000000000,
0x0100002000000000, 0x0500002000000000, 0x8100002000000000, 0x8500002000000000, ),
( 0x0000000000000000, 0x0000800000000000, 0x0800000000000000, 0x0800800000000000,
0x0000004000000000, 0x0000804000000000, 0x0800004000000000, 0x0800804000000000,
0x0000000400000000, 0x0000800400000000, 0x0800000400000000, 0x0800800400000000,
0x0000004400000000, 0x0000804400000000, 0x0800004400000000, 0x0800804400000000, ),
( 0x0000000000000000, 0x0080000000000000, 0x0000040000000000, 0x0080040000000000,
0x0008000000000000, 0x0088000000000000, 0x0008040000000000, 0x0088040000000000,
0x0000200000000000, 0x0080200000000000, 0x0000240000000000, 0x0080240000000000,
0x0008200000000000, 0x0088200000000000, 0x0008240000000000, 0x0088240000000000, ),
)
# NOTE: this was reordered from original table to make perm3264 logic simpler
PC2ROTB=(
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000400, 0x0000000000000400, 0x0000000000000400, 0x0000000000000400,
0x0000000000080000, 0x0000000000080000, 0x0000000000080000, 0x0000000000080000,
0x0000000000080400, 0x0000000000080400, 0x0000000000080400, 0x0000000000080400, ),
( 0x0000000000000000, 0x0000000000800000, 0x0000000000004000, 0x0000000000804000,
0x0000000080000000, 0x0000000080800000, 0x0000000080004000, 0x0000000080804000,
0x0000000000040000, 0x0000000000840000, 0x0000000000044000, 0x0000000000844000,
0x0000000080040000, 0x0000000080840000, 0x0000000080044000, 0x0000000080844000, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000000008, 0x0000000000000008, 0x0000000000000008, 0x0000000000000008,
0x0000000040000000, 0x0000000040000000, 0x0000000040000000, 0x0000000040000000,
0x0000000040000008, 0x0000000040000008, 0x0000000040000008, 0x0000000040000008, ),
( 0x0000000000000000, 0x0000000020000000, 0x0000000200000000, 0x0000000220000000,
0x0000000000000080, 0x0000000020000080, 0x0000000200000080, 0x0000000220000080,
0x0000000000100000, 0x0000000020100000, 0x0000000200100000, 0x0000000220100000,
0x0000000000100080, 0x0000000020100080, 0x0000000200100080, 0x0000000220100080, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000002000, 0x0000000000002000, 0x0000000000002000, 0x0000000000002000,
0x0000020000000000, 0x0000020000000000, 0x0000020000000000, 0x0000020000000000,
0x0000020000002000, 0x0000020000002000, 0x0000020000002000, 0x0000020000002000, ),
( 0x0000000000000000, 0x0000000000000800, 0x0000000100000000, 0x0000000100000800,
0x0000000010000000, 0x0000000010000800, 0x0000000110000000, 0x0000000110000800,
0x0000000000000004, 0x0000000000000804, 0x0000000100000004, 0x0000000100000804,
0x0000000010000004, 0x0000000010000804, 0x0000000110000004, 0x0000000110000804, ),
( 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000,
0x0000000000001000, 0x0000000000001000, 0x0000000000001000, 0x0000000000001000,
0x0000000000000010, 0x0000000000000010, 0x0000000000000010, 0x0000000000000010,
0x0000000000001010, 0x0000000000001010, 0x0000000000001010, 0x0000000000001010, ),
( 0x0000000000000000, 0x0000000000000040, 0x0000010000000000, 0x0000010000000040,
0x0000000000200000, 0x0000000000200040, 0x0000010000200000, 0x0000010000200040,
0x0000000000008000, 0x0000000000008040, 0x0000010000008000, 0x0000010000008040,
0x0000000000208000, 0x0000000000208040, 0x0000010000208000, 0x0000010000208040, ),
( 0x0000000000000000, 0x0000000004000000, 0x0000000008000000, 0x000000000c000000,
0x0400000000000000, 0x0400000004000000, 0x0400000008000000, 0x040000000c000000,
0x8000000000000000, 0x8000000004000000, 0x8000000008000000, 0x800000000c000000,
0x8400000000000000, 0x8400000004000000, 0x8400000008000000, 0x840000000c000000, ),
( 0x0000000000000000, 0x0002000000000000, 0x0200000000000000, 0x0202000000000000,
0x1000000000000000, 0x1002000000000000, 0x1200000000000000, 0x1202000000000000,
0x0008000000000000, 0x000a000000000000, 0x0208000000000000, 0x020a000000000000,
0x1008000000000000, 0x100a000000000000, 0x1208000000000000, 0x120a000000000000, ),
( 0x0000000000000000, 0x0000000000400000, 0x0000000000000020, 0x0000000000400020,
0x0040000000000000, 0x0040000000400000, 0x0040000000000020, 0x0040000000400020,
0x0800000000000000, 0x0800000000400000, 0x0800000000000020, 0x0800000000400020,
0x0840000000000000, 0x0840000000400000, 0x0840000000000020, 0x0840000000400020, ),
( 0x0000000000000000, 0x0080000000000000, 0x0000008000000000, 0x0080008000000000,
0x2000000000000000, 0x2080000000000000, 0x2000008000000000, 0x2080008000000000,
0x0020000000000000, 0x00a0000000000000, 0x0020008000000000, 0x00a0008000000000,
0x2020000000000000, 0x20a0000000000000, 0x2020008000000000, 0x20a0008000000000, ),
( 0x0000000000000000, 0x0000002000000000, 0x0000040000000000, 0x0000042000000000,
0x4000000000000000, 0x4000002000000000, 0x4000040000000000, 0x4000042000000000,
0x0000400000000000, 0x0000402000000000, 0x0000440000000000, 0x0000442000000000,
0x4000400000000000, 0x4000402000000000, 0x4000440000000000, 0x4000442000000000, ),
( 0x0000000000000000, 0x0000004000000000, 0x0000200000000000, 0x0000204000000000,
0x0000080000000000, 0x0000084000000000, 0x0000280000000000, 0x0000284000000000,
0x0000800000000000, 0x0000804000000000, 0x0000a00000000000, 0x0000a04000000000,
0x0000880000000000, 0x0000884000000000, 0x0000a80000000000, 0x0000a84000000000, ),
( 0x0000000000000000, 0x0000000800000000, 0x0000000400000000, 0x0000000c00000000,
0x0000100000000000, 0x0000100800000000, 0x0000100400000000, 0x0000100c00000000,
0x0010000000000000, 0x0010000800000000, 0x0010000400000000, 0x0010000c00000000,
0x0010100000000000, 0x0010100800000000, 0x0010100400000000, 0x0010100c00000000, ),
( 0x0000000000000000, 0x0100000000000000, 0x0001000000000000, 0x0101000000000000,
0x0000001000000000, 0x0100001000000000, 0x0001001000000000, 0x0101001000000000,
0x0004000000000000, 0x0104000000000000, 0x0005000000000000, 0x0105000000000000,
0x0004001000000000, 0x0104001000000000, 0x0005001000000000, 0x0105001000000000, ),
)
#---------------------------------------------------------------
# PCXROT - PC1ROT, PC2ROTA, PC2ROTB listed in order
# of the PC1 rotation schedule, as used by des_setkey
#---------------------------------------------------------------
##ROTATES = (1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1)
##PCXROT = (
## PC1ROT, PC2ROTA, PC2ROTB, PC2ROTB,
## PC2ROTB, PC2ROTB, PC2ROTB, PC2ROTB,
## PC2ROTA, PC2ROTB, PC2ROTB, PC2ROTB,
## PC2ROTB, PC2ROTB, PC2ROTB, PC2ROTA,
## )
# NOTE: modified PCXROT to contain entrys broken into pairs,
# to help generate them in format best used by encoder.
PCXROT = (
(PC1ROT, PC2ROTA), (PC2ROTB, PC2ROTB),
(PC2ROTB, PC2ROTB), (PC2ROTB, PC2ROTB),
(PC2ROTA, PC2ROTB), (PC2ROTB, PC2ROTB),
(PC2ROTB, PC2ROTB), (PC2ROTB, PC2ROTA),
)
#---------------------------------------------------------------
# Bit reverse, intial permupation, expantion
# Initial permutation/expansion table
#---------------------------------------------------------------
# NOTE: this was reordered from original table to make perm3264 logic simpler
IE3264=(
( 0x0000000000000000, 0x0000000000800800, 0x0000000000008008, 0x0000000000808808,
0x0000008008000000, 0x0000008008800800, 0x0000008008008008, 0x0000008008808808,
0x0000000080080000, 0x0000000080880800, 0x0000000080088008, 0x0000000080888808,
0x0000008088080000, 0x0000008088880800, 0x0000008088088008, 0x0000008088888808, ),
( 0x0000000000000000, 0x0080080000000000, 0x0000800800000000, 0x0080880800000000,
0x0800000000000080, 0x0880080000000080, 0x0800800800000080, 0x0880880800000080,
0x8008000000000000, 0x8088080000000000, 0x8008800800000000, 0x8088880800000000,
0x8808000000000080, 0x8888080000000080, 0x8808800800000080, 0x8888880800000080, ),
( 0x0000000000000000, 0x0000000000001000, 0x0000000000000010, 0x0000000000001010,
0x0000000010000000, 0x0000000010001000, 0x0000000010000010, 0x0000000010001010,
0x0000000000100000, 0x0000000000101000, 0x0000000000100010, 0x0000000000101010,
0x0000000010100000, 0x0000000010101000, 0x0000000010100010, 0x0000000010101010, ),
( 0x0000000000000000, 0x0000100000000000, 0x0000001000000000, 0x0000101000000000,
0x1000000000000000, 0x1000100000000000, 0x1000001000000000, 0x1000101000000000,
0x0010000000000000, 0x0010100000000000, 0x0010001000000000, 0x0010101000000000,
0x1010000000000000, 0x1010100000000000, 0x1010001000000000, 0x1010101000000000, ),
( 0x0000000000000000, 0x0000000000002000, 0x0000000000000020, 0x0000000000002020,
0x0000000020000000, 0x0000000020002000, 0x0000000020000020, 0x0000000020002020,
0x0000000000200000, 0x0000000000202000, 0x0000000000200020, 0x0000000000202020,
0x0000000020200000, 0x0000000020202000, 0x0000000020200020, 0x0000000020202020, ),
( 0x0000000000000000, 0x0000200000000000, 0x0000002000000000, 0x0000202000000000,
0x2000000000000000, 0x2000200000000000, 0x2000002000000000, 0x2000202000000000,
0x0020000000000000, 0x0020200000000000, 0x0020002000000000, 0x0020202000000000,
0x2020000000000000, 0x2020200000000000, 0x2020002000000000, 0x2020202000000000, ),
( 0x0000000000000000, 0x0000000000004004, 0x0400000000000040, 0x0400000000004044,
0x0000000040040000, 0x0000000040044004, 0x0400000040040040, 0x0400000040044044,
0x0000000000400400, 0x0000000000404404, 0x0400000000400440, 0x0400000000404444,
0x0000000040440400, 0x0000000040444404, 0x0400000040440440, 0x0400000040444444, ),
( 0x0000000000000000, 0x0000400400000000, 0x0000004004000000, 0x0000404404000000,
0x4004000000000000, 0x4004400400000000, 0x4004004004000000, 0x4004404404000000,
0x0040040000000000, 0x0040440400000000, 0x0040044004000000, 0x0040444404000000,
0x4044040000000000, 0x4044440400000000, 0x4044044004000000, 0x4044444404000000, ),
)
#---------------------------------------------------------------
# Table that combines the S, P, and E operations.
#---------------------------------------------------------------
SPE=(
( 0x0080088008200000, 0x0000008008000000, 0x0000000000200020, 0x0080088008200020,
0x0000000000200000, 0x0080088008000020, 0x0000008008000020, 0x0000000000200020,
0x0080088008000020, 0x0080088008200000, 0x0000008008200000, 0x0080080000000020,
0x0080080000200020, 0x0000000000200000, 0x0000000000000000, 0x0000008008000020,
0x0000008008000000, 0x0000000000000020, 0x0080080000200000, 0x0080088008000000,
0x0080088008200020, 0x0000008008200000, 0x0080080000000020, 0x0080080000200000,
0x0000000000000020, 0x0080080000000000, 0x0080088008000000, 0x0000008008200020,
0x0080080000000000, 0x0080080000200020, 0x0000008008200020, 0x0000000000000000,
0x0000000000000000, 0x0080088008200020, 0x0080080000200000, 0x0000008008000020,
0x0080088008200000, 0x0000008008000000, 0x0080080000000020, 0x0080080000200000,
0x0000008008200020, 0x0080080000000000, 0x0080088008000000, 0x0000000000200020,
0x0080088008000020, 0x0000000000000020, 0x0000000000200020, 0x0000008008200000,
0x0080088008200020, 0x0080088008000000, 0x0000008008200000, 0x0080080000200020,
0x0000000000200000, 0x0080080000000020, 0x0000008008000020, 0x0000000000000000,
0x0000008008000000, 0x0000000000200000, 0x0080080000200020, 0x0080088008200000,
0x0000000000000020, 0x0000008008200020, 0x0080080000000000, 0x0080088008000020, ),
( 0x1000800810004004, 0x0000000000000000, 0x0000800810000000, 0x0000000010004004,
0x1000000000004004, 0x1000800800000000, 0x0000800800004004, 0x0000800810000000,
0x0000800800000000, 0x1000000010004004, 0x1000000000000000, 0x0000800800004004,
0x1000000010000000, 0x0000800810004004, 0x0000000010004004, 0x1000000000000000,
0x0000000010000000, 0x1000800800004004, 0x1000000010004004, 0x0000800800000000,
0x1000800810000000, 0x0000000000004004, 0x0000000000000000, 0x1000000010000000,
0x1000800800004004, 0x1000800810000000, 0x0000800810004004, 0x1000000000004004,
0x0000000000004004, 0x0000000010000000, 0x1000800800000000, 0x1000800810004004,
0x1000000010000000, 0x0000800810004004, 0x0000800800004004, 0x1000800810000000,
0x1000800810004004, 0x1000000010000000, 0x1000000000004004, 0x0000000000000000,
0x0000000000004004, 0x1000800800000000, 0x0000000010000000, 0x1000000010004004,
0x0000800800000000, 0x0000000000004004, 0x1000800810000000, 0x1000800800004004,
0x0000800810004004, 0x0000800800000000, 0x0000000000000000, 0x1000000000004004,
0x1000000000000000, 0x1000800810004004, 0x0000800810000000, 0x0000000010004004,
0x1000000010004004, 0x0000000010000000, 0x1000800800000000, 0x0000800800004004,
0x1000800800004004, 0x1000000000000000, 0x0000000010004004, 0x0000800810000000, ),
( 0x0000000000400410, 0x0010004004400400, 0x0010000000000000, 0x0010000000400410,
0x0000004004000010, 0x0000000000400400, 0x0010000000400410, 0x0010004004000000,
0x0010000000400400, 0x0000004004000000, 0x0000004004400400, 0x0000000000000010,
0x0010004004400410, 0x0010000000000010, 0x0000000000000010, 0x0000004004400410,
0x0000000000000000, 0x0000004004000010, 0x0010004004400400, 0x0010000000000000,
0x0010000000000010, 0x0010004004400410, 0x0000004004000000, 0x0000000000400410,
0x0000004004400410, 0x0010000000400400, 0x0010004004000010, 0x0000004004400400,
0x0010004004000000, 0x0000000000000000, 0x0000000000400400, 0x0010004004000010,
0x0010004004400400, 0x0010000000000000, 0x0000000000000010, 0x0000004004000000,
0x0010000000000010, 0x0000004004000010, 0x0000004004400400, 0x0010000000400410,
0x0000000000000000, 0x0010004004400400, 0x0010004004000000, 0x0000004004400410,
0x0000004004000010, 0x0000000000400400, 0x0010004004400410, 0x0000000000000010,
0x0010004004000010, 0x0000000000400410, 0x0000000000400400, 0x0010004004400410,
0x0000004004000000, 0x0010000000400400, 0x0010000000400410, 0x0010004004000000,
0x0010000000400400, 0x0000000000000000, 0x0000004004400410, 0x0010000000000010,
0x0000000000400410, 0x0010004004000010, 0x0010000000000000, 0x0000004004400400, ),
( 0x0800100040040080, 0x0000100000001000, 0x0800000000000080, 0x0800100040041080,
0x0000000000000000, 0x0000000040041000, 0x0800100000001080, 0x0800000040040080,
0x0000100040041000, 0x0800000000001080, 0x0000000000001000, 0x0800100000000080,
0x0800000000001080, 0x0800100040040080, 0x0000000040040000, 0x0000000000001000,
0x0800000040041080, 0x0000100040040000, 0x0000100000000000, 0x0800000000000080,
0x0000100040040000, 0x0800100000001080, 0x0000000040041000, 0x0000100000000000,
0x0800100000000080, 0x0000000000000000, 0x0800000040040080, 0x0000100040041000,
0x0000100000001000, 0x0800000040041080, 0x0800100040041080, 0x0000000040040000,
0x0800000040041080, 0x0800100000000080, 0x0000000040040000, 0x0800000000001080,
0x0000100040040000, 0x0000100000001000, 0x0800000000000080, 0x0000000040041000,
0x0800100000001080, 0x0000000000000000, 0x0000100000000000, 0x0800000040040080,
0x0000000000000000, 0x0800000040041080, 0x0000100040041000, 0x0000100000000000,
0x0000000000001000, 0x0800100040041080, 0x0800100040040080, 0x0000000040040000,
0x0800100040041080, 0x0800000000000080, 0x0000100000001000, 0x0800100040040080,
0x0800000040040080, 0x0000100040040000, 0x0000000040041000, 0x0800100000001080,
0x0800100000000080, 0x0000000000001000, 0x0800000000001080, 0x0000100040041000, ),
( 0x0000000000800800, 0x0000001000000000, 0x0040040000000000, 0x2040041000800800,
0x2000001000800800, 0x0040040000800800, 0x2040041000000000, 0x0000001000800800,
0x0000001000000000, 0x2000000000000000, 0x2000000000800800, 0x0040041000000000,
0x2040040000800800, 0x2000001000800800, 0x0040041000800800, 0x0000000000000000,
0x0040041000000000, 0x0000000000800800, 0x2000001000000000, 0x2040040000000000,
0x0040040000800800, 0x2040041000000000, 0x0000000000000000, 0x2000000000800800,
0x2000000000000000, 0x2040040000800800, 0x2040041000800800, 0x2000001000000000,
0x0000001000800800, 0x0040040000000000, 0x2040040000000000, 0x0040041000800800,
0x0040041000800800, 0x2040040000800800, 0x2000001000000000, 0x0000001000800800,
0x0000001000000000, 0x2000000000000000, 0x2000000000800800, 0x0040040000800800,
0x0000000000800800, 0x0040041000000000, 0x2040041000800800, 0x0000000000000000,
0x2040041000000000, 0x0000000000800800, 0x0040040000000000, 0x2000001000000000,
0x2040040000800800, 0x0040040000000000, 0x0000000000000000, 0x2040041000800800,
0x2000001000800800, 0x0040041000800800, 0x2040040000000000, 0x0000001000000000,
0x0040041000000000, 0x2000001000800800, 0x0040040000800800, 0x2040040000000000,
0x2000000000000000, 0x2040041000000000, 0x0000001000800800, 0x2000000000800800, ),
( 0x4004000000008008, 0x4004000020000000, 0x0000000000000000, 0x0000200020008008,
0x4004000020000000, 0x0000200000000000, 0x4004200000008008, 0x0000000020000000,
0x4004200000000000, 0x4004200020008008, 0x0000200020000000, 0x0000000000008008,
0x0000200000008008, 0x4004000000008008, 0x0000000020008008, 0x4004200020000000,
0x0000000020000000, 0x4004200000008008, 0x4004000020008008, 0x0000000000000000,
0x0000200000000000, 0x4004000000000000, 0x0000200020008008, 0x4004000020008008,
0x4004200020008008, 0x0000000020008008, 0x0000000000008008, 0x4004200000000000,
0x4004000000000000, 0x0000200020000000, 0x4004200020000000, 0x0000200000008008,
0x4004200000000000, 0x0000000000008008, 0x0000200000008008, 0x4004200020000000,
0x0000200020008008, 0x4004000020000000, 0x0000000000000000, 0x0000200000008008,
0x0000000000008008, 0x0000200000000000, 0x4004000020008008, 0x0000000020000000,
0x4004000020000000, 0x4004200020008008, 0x0000200020000000, 0x4004000000000000,
0x4004200020008008, 0x0000200020000000, 0x0000000020000000, 0x4004200000008008,
0x4004000000008008, 0x0000000020008008, 0x4004200020000000, 0x0000000000000000,
0x0000200000000000, 0x4004000000008008, 0x4004200000008008, 0x0000200020008008,
0x0000000020008008, 0x4004200000000000, 0x4004000000000000, 0x4004000020008008, ),
( 0x0000400400000000, 0x0020000000000000, 0x0020000000100000, 0x0400000000100040,
0x0420400400100040, 0x0400400400000040, 0x0020400400000000, 0x0000000000000000,
0x0000000000100000, 0x0420000000100040, 0x0420000000000040, 0x0000400400100000,
0x0400000000000040, 0x0020400400100000, 0x0000400400100000, 0x0420000000000040,
0x0420000000100040, 0x0000400400000000, 0x0400400400000040, 0x0420400400100040,
0x0000000000000000, 0x0020000000100000, 0x0400000000100040, 0x0020400400000000,
0x0400400400100040, 0x0420400400000040, 0x0020400400100000, 0x0400000000000040,
0x0420400400000040, 0x0400400400100040, 0x0020000000000000, 0x0000000000100000,
0x0420400400000040, 0x0000400400100000, 0x0400400400100040, 0x0420000000000040,
0x0000400400000000, 0x0020000000000000, 0x0000000000100000, 0x0400400400100040,
0x0420000000100040, 0x0420400400000040, 0x0020400400000000, 0x0000000000000000,
0x0020000000000000, 0x0400000000100040, 0x0400000000000040, 0x0020000000100000,
0x0000000000000000, 0x0420000000100040, 0x0020000000100000, 0x0020400400000000,
0x0420000000000040, 0x0000400400000000, 0x0420400400100040, 0x0000000000100000,
0x0020400400100000, 0x0400000000000040, 0x0400400400000040, 0x0420400400100040,
0x0400000000100040, 0x0020400400100000, 0x0000400400100000, 0x0400400400000040, ),
( 0x8008000080082000, 0x0000002080082000, 0x8008002000000000, 0x0000000000000000,
0x0000002000002000, 0x8008000080080000, 0x0000000080082000, 0x8008002080082000,
0x8008000000000000, 0x0000000000002000, 0x0000002080080000, 0x8008002000000000,
0x8008002080080000, 0x8008002000002000, 0x8008000000002000, 0x0000000080082000,
0x0000002000000000, 0x8008002080080000, 0x8008000080080000, 0x0000002000002000,
0x8008002080082000, 0x8008000000002000, 0x0000000000000000, 0x0000002080080000,
0x0000000000002000, 0x0000000080080000, 0x8008002000002000, 0x8008000080082000,
0x0000000080080000, 0x0000002000000000, 0x0000002080082000, 0x8008000000000000,
0x0000000080080000, 0x0000002000000000, 0x8008000000002000, 0x8008002080082000,
0x8008002000000000, 0x0000000000002000, 0x0000000000000000, 0x0000002080080000,
0x8008000080082000, 0x8008002000002000, 0x0000002000002000, 0x8008000080080000,
0x0000002080082000, 0x8008000000000000, 0x8008000080080000, 0x0000002000002000,
0x8008002080082000, 0x0000000080080000, 0x0000000080082000, 0x8008000000002000,
0x0000002080080000, 0x8008002000000000, 0x8008002000002000, 0x0000000080082000,
0x8008000000000000, 0x0000002080082000, 0x8008002080080000, 0x0000000000000000,
0x0000000000002000, 0x8008000080082000, 0x0000002000000000, 0x8008002080080000, ),
)
#---------------------------------------------------------------
# compressed/interleaved => final permutation table
# Compression, final permutation, bit reverse
#---------------------------------------------------------------
# NOTE: this was reordered from original table to make perm6464 logic simpler
CF6464=(
( 0x0000000000000000, 0x0000002000000000, 0x0000200000000000, 0x0000202000000000,
0x0020000000000000, 0x0020002000000000, 0x0020200000000000, 0x0020202000000000,
0x2000000000000000, 0x2000002000000000, 0x2000200000000000, 0x2000202000000000,
0x2020000000000000, 0x2020002000000000, 0x2020200000000000, 0x2020202000000000, ),
( 0x0000000000000000, 0x0000000200000000, 0x0000020000000000, 0x0000020200000000,
0x0002000000000000, 0x0002000200000000, 0x0002020000000000, 0x0002020200000000,
0x0200000000000000, 0x0200000200000000, 0x0200020000000000, 0x0200020200000000,
0x0202000000000000, 0x0202000200000000, 0x0202020000000000, 0x0202020200000000, ),
( 0x0000000000000000, 0x0000000000000020, 0x0000000000002000, 0x0000000000002020,
0x0000000000200000, 0x0000000000200020, 0x0000000000202000, 0x0000000000202020,
0x0000000020000000, 0x0000000020000020, 0x0000000020002000, 0x0000000020002020,
0x0000000020200000, 0x0000000020200020, 0x0000000020202000, 0x0000000020202020, ),
( 0x0000000000000000, 0x0000000000000002, 0x0000000000000200, 0x0000000000000202,
0x0000000000020000, 0x0000000000020002, 0x0000000000020200, 0x0000000000020202,
0x0000000002000000, 0x0000000002000002, 0x0000000002000200, 0x0000000002000202,
0x0000000002020000, 0x0000000002020002, 0x0000000002020200, 0x0000000002020202, ),
( 0x0000000000000000, 0x0000008000000000, 0x0000800000000000, 0x0000808000000000,
0x0080000000000000, 0x0080008000000000, 0x0080800000000000, 0x0080808000000000,
0x8000000000000000, 0x8000008000000000, 0x8000800000000000, 0x8000808000000000,
0x8080000000000000, 0x8080008000000000, 0x8080800000000000, 0x8080808000000000, ),
( 0x0000000000000000, 0x0000000800000000, 0x0000080000000000, 0x0000080800000000,
0x0008000000000000, 0x0008000800000000, 0x0008080000000000, 0x0008080800000000,
0x0800000000000000, 0x0800000800000000, 0x0800080000000000, 0x0800080800000000,
0x0808000000000000, 0x0808000800000000, 0x0808080000000000, 0x0808080800000000, ),
( 0x0000000000000000, 0x0000000000000080, 0x0000000000008000, 0x0000000000008080,
0x0000000000800000, 0x0000000000800080, 0x0000000000808000, 0x0000000000808080,
0x0000000080000000, 0x0000000080000080, 0x0000000080008000, 0x0000000080008080,
0x0000000080800000, 0x0000000080800080, 0x0000000080808000, 0x0000000080808080, ),
( 0x0000000000000000, 0x0000000000000008, 0x0000000000000800, 0x0000000000000808,
0x0000000000080000, 0x0000000000080008, 0x0000000000080800, 0x0000000000080808,
0x0000000008000000, 0x0000000008000008, 0x0000000008000800, 0x0000000008000808,
0x0000000008080000, 0x0000000008080008, 0x0000000008080800, 0x0000000008080808, ),
( 0x0000000000000000, 0x0000001000000000, 0x0000100000000000, 0x0000101000000000,
0x0010000000000000, 0x0010001000000000, 0x0010100000000000, 0x0010101000000000,
0x1000000000000000, 0x1000001000000000, 0x1000100000000000, 0x1000101000000000,
0x1010000000000000, 0x1010001000000000, 0x1010100000000000, 0x1010101000000000, ),
( 0x0000000000000000, 0x0000000100000000, 0x0000010000000000, 0x0000010100000000,
0x0001000000000000, 0x0001000100000000, 0x0001010000000000, 0x0001010100000000,
0x0100000000000000, 0x0100000100000000, 0x0100010000000000, 0x0100010100000000,
0x0101000000000000, 0x0101000100000000, 0x0101010000000000, 0x0101010100000000, ),
( 0x0000000000000000, 0x0000000000000010, 0x0000000000001000, 0x0000000000001010,
0x0000000000100000, 0x0000000000100010, 0x0000000000101000, 0x0000000000101010,
0x0000000010000000, 0x0000000010000010, 0x0000000010001000, 0x0000000010001010,
0x0000000010100000, 0x0000000010100010, 0x0000000010101000, 0x0000000010101010, ),
( 0x0000000000000000, 0x0000000000000001, 0x0000000000000100, 0x0000000000000101,
0x0000000000010000, 0x0000000000010001, 0x0000000000010100, 0x0000000000010101,
0x0000000001000000, 0x0000000001000001, 0x0000000001000100, 0x0000000001000101,
0x0000000001010000, 0x0000000001010001, 0x0000000001010100, 0x0000000001010101, ),
( 0x0000000000000000, 0x0000004000000000, 0x0000400000000000, 0x0000404000000000,
0x0040000000000000, 0x0040004000000000, 0x0040400000000000, 0x0040404000000000,
0x4000000000000000, 0x4000004000000000, 0x4000400000000000, 0x4000404000000000,
0x4040000000000000, 0x4040004000000000, 0x4040400000000000, 0x4040404000000000, ),
( 0x0000000000000000, 0x0000000400000000, 0x0000040000000000, 0x0000040400000000,
0x0004000000000000, 0x0004000400000000, 0x0004040000000000, 0x0004040400000000,
0x0400000000000000, 0x0400000400000000, 0x0400040000000000, 0x0400040400000000,
0x0404000000000000, 0x0404000400000000, 0x0404040000000000, 0x0404040400000000, ),
( 0x0000000000000000, 0x0000000000000040, 0x0000000000004000, 0x0000000000004040,
0x0000000000400000, 0x0000000000400040, 0x0000000000404000, 0x0000000000404040,
0x0000000040000000, 0x0000000040000040, 0x0000000040004000, 0x0000000040004040,
0x0000000040400000, 0x0000000040400040, 0x0000000040404000, 0x0000000040404040, ),
( 0x0000000000000000, 0x0000000000000004, 0x0000000000000400, 0x0000000000000404,
0x0000000000040000, 0x0000000000040004, 0x0000000000040400, 0x0000000000040404,
0x0000000004000000, 0x0000000004000004, 0x0000000004000400, 0x0000000004000404,
0x0000000004040000, 0x0000000004040004, 0x0000000004040400, 0x0000000004040404, ),
)
#===================================================================
# eof _load_tables()
#===================================================================
#=============================================================================
# support
#=============================================================================
def _permute(c, p):
"""Returns the permutation of the given 32-bit or 64-bit code with
the specified permutation table."""
# NOTE: only difference between 32 & 64 bit permutations
# is that len(p)==8 for 32 bit, and len(p)==16 for 64 bit.
out = 0
for r in p:
out |= r[c&0xf]
c >>= 4
return out
#=============================================================================
# packing & unpacking
#=============================================================================
# FIXME: more properly named _uint8_struct...
_uint64_struct = struct.Struct(">Q")
def _pack64(value):
return _uint64_struct.pack(value)
def _unpack64(value):
return _uint64_struct.unpack(value)[0]
def _pack56(value):
return _uint64_struct.pack(value)[1:]
def _unpack56(value):
return _uint64_struct.unpack(b'\x00' + value)[0]
#=============================================================================
# 56->64 key manipulation
#=============================================================================
##def expand_7bit(value):
## "expand 7-bit integer => 7-bits + 1 odd-parity bit"
## # parity calc adapted from 32-bit even parity alg found at
## # http://graphics.stanford.edu/~seander/bithacks.html#ParityParallel
## assert 0 <= value < 0x80, "value out of range"
## return (value<<1) | (0x9669 >> ((value ^ (value >> 4)) & 0xf)) & 1
_EXPAND_ITER = irange(49,-7,-7)
def expand_des_key(key):
"""convert DES from 7 bytes to 8 bytes (by inserting empty parity bits)"""
if isinstance(key, bytes):
if len(key) != 7:
raise ValueError("key must be 7 bytes in size")
elif isinstance(key, int_types):
if key < 0 or key > INT_56_MASK:
raise ValueError("key must be 56-bit non-negative integer")
return _unpack64(expand_des_key(_pack56(key)))
else:
raise exc.ExpectedTypeError(key, "bytes or int", "key")
key = _unpack56(key)
# NOTE: the following would insert correctly-valued parity bits in each key,
# but the parity bit would just be ignored in des_encrypt_block(),
# so not bothering to use it.
# XXX: could make parity-restoring optionally available via flag
##return join_byte_values(expand_7bit((key >> shift) & 0x7f)
## for shift in _EXPAND_ITER)
return join_byte_values(((key>>shift) & 0x7f)<<1 for shift in _EXPAND_ITER)
def shrink_des_key(key):
"""convert DES key from 8 bytes to 7 bytes (by discarding the parity bits)"""
if isinstance(key, bytes):
if len(key) != 8:
raise ValueError("key must be 8 bytes in size")
return _pack56(shrink_des_key(_unpack64(key)))
elif isinstance(key, int_types):
if key < 0 or key > INT_64_MASK:
raise ValueError("key must be 64-bit non-negative integer")
else:
raise exc.ExpectedTypeError(key, "bytes or int", "key")
key >>= 1
result = 0
offset = 0
while offset < 56:
result |= (key & 0x7f)<<offset
key >>= 8
offset += 7
assert not (result & ~INT_64_MASK)
return result
#=============================================================================
# des encryption
#=============================================================================
def des_encrypt_block(key, input, salt=0, rounds=1):
"""encrypt single block of data using DES, operates on 8-byte strings.
:arg key:
DES key as 7 byte string, or 8 byte string with parity bits
(parity bit values are ignored).
:arg input:
plaintext block to encrypt, as 8 byte string.
:arg salt:
Optional 24-bit integer used to mutate the base DES algorithm in a
manner specific to :class:`~zdppy_password_hash.hash.des_crypt` and its variants.
The default value ``0`` provides the normal (unsalted) DES behavior.
The salt functions as follows:
if the ``i``'th bit of ``salt`` is set,
bits ``i`` and ``i+24`` are swapped in the DES E-box output.
:arg rounds:
Optional number of rounds of to apply the DES key schedule.
the default (``rounds=1``) provides the normal DES behavior,
but :class:`~zdppy_password_hash.hash.des_crypt` and its variants use
alternate rounds values.
:raises TypeError: if any of the provided args are of the wrong type.
:raises ValueError:
if any of the input blocks are the wrong size,
or the salt/rounds values are out of range.
:returns:
resulting 8-byte ciphertext block.
"""
# validate & unpack key
if isinstance(key, bytes):
if len(key) == 7:
key = expand_des_key(key)
elif len(key) != 8:
raise ValueError("key must be 7 or 8 bytes")
key = _unpack64(key)
else:
raise exc.ExpectedTypeError(key, "bytes", "key")
# validate & unpack input
if isinstance(input, bytes):
if len(input) != 8:
raise ValueError("input block must be 8 bytes")
input = _unpack64(input)
else:
raise exc.ExpectedTypeError(input, "bytes", "input")
# hand things off to other func
result = des_encrypt_int_block(key, input, salt, rounds)
# repack result
return _pack64(result)
def des_encrypt_int_block(key, input, salt=0, rounds=1):
"""encrypt single block of data using DES, operates on 64-bit integers.
this function is essentially the same as :func:`des_encrypt_block`,
except that it operates on integers, and will NOT automatically
expand 56-bit keys if provided (since there's no way to detect them).
:arg key:
DES key as 64-bit integer (the parity bits are ignored).
:arg input:
input block as 64-bit integer
:arg salt:
optional 24-bit integer used to mutate the base DES algorithm.
defaults to ``0`` (no mutation applied).
:arg rounds:
optional number of rounds of to apply the DES key schedule.
defaults to ``1``.
:raises TypeError: if any of the provided args are of the wrong type.
:raises ValueError:
if any of the input blocks are the wrong size,
or the salt/rounds values are out of range.
:returns:
resulting ciphertext as 64-bit integer.
"""
#---------------------------------------------------------------
# input validation
#---------------------------------------------------------------
# validate salt, rounds
if rounds < 1:
raise ValueError("rounds must be positive integer")
if salt < 0 or salt > INT_24_MASK:
raise ValueError("salt must be 24-bit non-negative integer")
# validate & unpack key
if not isinstance(key, int_types):
raise exc.ExpectedTypeError(key, "int", "key")
elif key < 0 or key > INT_64_MASK:
raise ValueError("key must be 64-bit non-negative integer")
# validate & unpack input
if not isinstance(input, int_types):
raise exc.ExpectedTypeError(input, "int", "input")
elif input < 0 or input > INT_64_MASK:
raise ValueError("input must be 64-bit non-negative integer")
#---------------------------------------------------------------
# DES setup
#---------------------------------------------------------------
# load tables if not already done
global SPE, PCXROT, IE3264, CF6464
if PCXROT is None:
_load_tables()
# load SPE into local vars to speed things up and remove an array access call
SPE0, SPE1, SPE2, SPE3, SPE4, SPE5, SPE6, SPE7 = SPE
# NOTE: parity bits are ignored completely
# (UTs do fuzz testing to ensure this)
# generate key schedule
# NOTE: generation was modified to output two elements at a time,
# so that per-round loop could do two passes at once.
def _iter_key_schedule(ks_odd):
"""given 64-bit key, iterates over the 8 (even,odd) key schedule pairs"""
for p_even, p_odd in PCXROT:
ks_even = _permute(ks_odd, p_even)
ks_odd = _permute(ks_even, p_odd)
yield ks_even & _KS_MASK, ks_odd & _KS_MASK
ks_list = list(_iter_key_schedule(key))
# expand 24 bit salt -> 32 bit per des_crypt & bsdi_crypt
salt = (
((salt & 0x00003f) << 26) |
((salt & 0x000fc0) << 12) |
((salt & 0x03f000) >> 2) |
((salt & 0xfc0000) >> 16)
)
# init L & R
if input == 0:
L = R = 0
else:
L = ((input >> 31) & 0xaaaaaaaa) | (input & 0x55555555)
L = _permute(L, IE3264)
R = ((input >> 32) & 0xaaaaaaaa) | ((input >> 1) & 0x55555555)
R = _permute(R, IE3264)
#---------------------------------------------------------------
# main DES loop - run for specified number of rounds
#---------------------------------------------------------------
while rounds:
rounds -= 1
# run over each part of the schedule, 2 parts at a time
for ks_even, ks_odd in ks_list:
k = ((R>>32) ^ R) & salt # use the salt to flip specific bits
B = (k<<32) ^ k ^ R ^ ks_even
L ^= (SPE0[(B>>58)&0x3f] ^ SPE1[(B>>50)&0x3f] ^
SPE2[(B>>42)&0x3f] ^ SPE3[(B>>34)&0x3f] ^
SPE4[(B>>26)&0x3f] ^ SPE5[(B>>18)&0x3f] ^
SPE6[(B>>10)&0x3f] ^ SPE7[(B>>2)&0x3f])
k = ((L>>32) ^ L) & salt # use the salt to flip specific bits
B = (k<<32) ^ k ^ L ^ ks_odd
R ^= (SPE0[(B>>58)&0x3f] ^ SPE1[(B>>50)&0x3f] ^
SPE2[(B>>42)&0x3f] ^ SPE3[(B>>34)&0x3f] ^
SPE4[(B>>26)&0x3f] ^ SPE5[(B>>18)&0x3f] ^
SPE6[(B>>10)&0x3f] ^ SPE7[(B>>2)&0x3f])
# swap L and R
L, R = R, L
#---------------------------------------------------------------
# return final result
#---------------------------------------------------------------
C = (
((L>>3) & 0x0f0f0f0f00000000)
|
((L<<33) & 0xf0f0f0f000000000)
|
((R>>35) & 0x000000000f0f0f0f)
|
((R<<1) & 0x00000000f0f0f0f0)
)
return _permute(C, CF6464)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/des.py | des.py |
#==========================================================================
# imports
#==========================================================================
# core
import operator
import struct
# pkg
from zdppy_password_hash.utils.compat import izip
from zdppy_password_hash.crypto.digest import pbkdf2_hmac
from zdppy_password_hash.crypto.scrypt._salsa import salsa20
# local
__all__ =[
"ScryptEngine",
]
#==========================================================================
# scrypt engine
#==========================================================================
class ScryptEngine(object):
"""
helper class used to run scrypt kdf, see scrypt() for frontend
.. warning::
this class does NO validation of the input ranges or types.
it's not intended to be used directly,
but only as a backend for :func:`zdppy_password_hash.utils.scrypt.scrypt()`.
"""
#=================================================================
# instance attrs
#=================================================================
# primary scrypt config parameters
n = 0
r = 0
p = 0
# derived values & objects
smix_bytes = 0
iv_bytes = 0
bmix_len = 0
bmix_half_len = 0
bmix_struct = None
integerify = None
#=================================================================
# frontend
#=================================================================
@classmethod
def execute(cls, secret, salt, n, r, p, keylen):
"""create engine & run scrypt() hash calculation"""
return cls(n, r, p).run(secret, salt, keylen)
#=================================================================
# init
#=================================================================
def __init__(self, n, r, p):
# store config
self.n = n
self.r = r
self.p = p
self.smix_bytes = r << 7 # num bytes in smix input - 2*r*16*4
self.iv_bytes = self.smix_bytes * p
self.bmix_len = bmix_len = r << 5 # length of bmix block list - 32*r integers
self.bmix_half_len = r << 4
assert struct.calcsize("I") == 4
self.bmix_struct = struct.Struct("<" + str(bmix_len) + "I")
# use optimized bmix for certain cases
if r == 1:
self.bmix = self._bmix_1
# pick best integerify function - integerify(bmix_block) should
# take last 64 bytes of block and return a little-endian integer.
# since it's immediately converted % n, we only have to extract
# the first 32 bytes if n < 2**32 - which due to the current
# internal representation, is already unpacked as a 32-bit int.
if n <= 0xFFFFffff:
integerify = operator.itemgetter(-16)
else:
assert n <= 0xFFFFffffFFFFffff
ig1 = operator.itemgetter(-16)
ig2 = operator.itemgetter(-17)
def integerify(X):
return ig1(X) | (ig2(X)<<32)
self.integerify = integerify
#=================================================================
# frontend
#=================================================================
def run(self, secret, salt, keylen):
"""
run scrypt kdf for specified secret, salt, and keylen
.. note::
* time cost is ``O(n * r * p)``
* mem cost is ``O(n * r)``
"""
# stretch salt into initial byte array via pbkdf2
iv_bytes = self.iv_bytes
input = pbkdf2_hmac("sha256", secret, salt, rounds=1, keylen=iv_bytes)
# split initial byte array into 'p' mflen-sized chunks,
# and run each chunk through smix() to generate output chunk.
smix = self.smix
if self.p == 1:
output = smix(input)
else:
# XXX: *could* use threading here, if really high p values encountered,
# but would tradeoff for more memory usage.
smix_bytes = self.smix_bytes
output = b''.join(
smix(input[offset:offset+smix_bytes])
for offset in range(0, iv_bytes, smix_bytes)
)
# stretch final byte array into output via pbkdf2
return pbkdf2_hmac("sha256", secret, output, rounds=1, keylen=keylen)
#=================================================================
# smix() helper
#=================================================================
def smix(self, input):
"""run SCrypt smix function on a single input block
:arg input:
byte string containing input data.
interpreted as 32*r little endian 4 byte integers.
:returns:
byte string containing output data
derived by mixing input using n & r parameters.
.. note:: time & mem cost are both ``O(n * r)``
"""
# gather locals
bmix = self.bmix
bmix_struct = self.bmix_struct
integerify = self.integerify
n = self.n
# parse input into 32*r integers ('X' in scrypt source)
# mem cost -- O(r)
buffer = list(bmix_struct.unpack(input))
# starting with initial buffer contents, derive V s.t.
# V[0]=initial_buffer ... V[i] = bmix(V[i-1], V[i-1]) ... V[n-1] = bmix(V[n-2], V[n-2])
# final buffer contents should equal bmix(V[n-1], V[n-1])
#
# time cost -- O(n * r) -- n loops, bmix is O(r)
# mem cost -- O(n * r) -- V is n-element array of r-element tuples
# NOTE: could do time / memory tradeoff to shrink size of V
def vgen():
i = 0
while i < n:
last = tuple(buffer)
yield last
bmix(last, buffer)
i += 1
V = list(vgen())
# generate result from X & V.
#
# time cost -- O(n * r) -- loops n times, calls bmix() which has O(r) time cost
# mem cost -- O(1) -- allocates nothing, calls bmix() which has O(1) mem cost
get_v_elem = V.__getitem__
n_mask = n - 1
i = 0
while i < n:
j = integerify(buffer) & n_mask
result = tuple(a ^ b for a, b in izip(buffer, get_v_elem(j)))
bmix(result, buffer)
i += 1
# # NOTE: we could easily support arbitrary values of ``n``, not just powers of 2,
# # but very few implementations have that ability, so not enabling it for now...
# if not n_is_log_2:
# while i < n:
# j = integerify(buffer) % n
# tmp = tuple(a^b for a,b in izip(buffer, get_v_elem(j)))
# bmix(tmp,buffer)
# i += 1
# repack tmp
return bmix_struct.pack(*buffer)
#=================================================================
# bmix() helper
#=================================================================
def bmix(self, source, target):
"""
block mixing function used by smix()
uses salsa20/8 core to mix block contents.
:arg source:
source to read from.
should be list of 32*r 4-byte integers
(2*r salsa20 blocks).
:arg target:
target to write to.
should be list with same size as source.
the existing value of this buffer is ignored.
.. warning::
this operates *in place* on target,
so source & target should NOT be same list.
.. note::
* time cost is ``O(r)`` -- loops 16*r times, salsa20() has ``O(1)`` cost.
* memory cost is ``O(1)`` -- salsa20() uses 16 x uint4,
all other operations done in-place.
"""
## assert source is not target
# Y[-1] = B[2r-1], Y[i] = hash( Y[i-1] xor B[i])
# B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
half = self.bmix_half_len # 16*r out of 32*r - start of Y_1
tmp = source[-16:] # 'X' in scrypt source
siter = iter(source)
j = 0
while j < half:
jn = j+16
target[j:jn] = tmp = salsa20(a ^ b for a, b in izip(tmp, siter))
target[half+j:half+jn] = tmp = salsa20(a ^ b for a, b in izip(tmp, siter))
j = jn
def _bmix_1(self, source, target):
"""special bmix() method optimized for ``r=1`` case"""
B = source[16:]
target[:16] = tmp = salsa20(a ^ b for a, b in izip(B, iter(source)))
target[16:] = salsa20(a ^ b for a, b in izip(tmp, B))
#=================================================================
# eoc
#=================================================================
#==========================================================================
# eof
#========================================================================== | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/scrypt/_builtin.py | _builtin.py |
#=================================================================
# salsa function
#=================================================================
def salsa20(input):
"""apply the salsa20/8 core to the provided input
:args input: input list containing 16 32-bit integers
:returns: result list containing 16 32-bit integers
"""
b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 = input
v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15 = \
b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15
i = 0
while i < 4:
# salsa op 0: [4] ^= ([0]+[12])<<<7
t = (v0 + v12) & 0xffffffff
v4 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 1: [8] ^= ([4]+[0])<<<9
t = (v4 + v0) & 0xffffffff
v8 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 2: [12] ^= ([8]+[4])<<<13
t = (v8 + v4) & 0xffffffff
v12 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 3: [0] ^= ([12]+[8])<<<18
t = (v12 + v8) & 0xffffffff
v0 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 4: [9] ^= ([5]+[1])<<<7
t = (v5 + v1) & 0xffffffff
v9 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 5: [13] ^= ([9]+[5])<<<9
t = (v9 + v5) & 0xffffffff
v13 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 6: [1] ^= ([13]+[9])<<<13
t = (v13 + v9) & 0xffffffff
v1 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 7: [5] ^= ([1]+[13])<<<18
t = (v1 + v13) & 0xffffffff
v5 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 8: [14] ^= ([10]+[6])<<<7
t = (v10 + v6) & 0xffffffff
v14 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 9: [2] ^= ([14]+[10])<<<9
t = (v14 + v10) & 0xffffffff
v2 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 10: [6] ^= ([2]+[14])<<<13
t = (v2 + v14) & 0xffffffff
v6 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 11: [10] ^= ([6]+[2])<<<18
t = (v6 + v2) & 0xffffffff
v10 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 12: [3] ^= ([15]+[11])<<<7
t = (v15 + v11) & 0xffffffff
v3 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 13: [7] ^= ([3]+[15])<<<9
t = (v3 + v15) & 0xffffffff
v7 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 14: [11] ^= ([7]+[3])<<<13
t = (v7 + v3) & 0xffffffff
v11 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 15: [15] ^= ([11]+[7])<<<18
t = (v11 + v7) & 0xffffffff
v15 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 16: [1] ^= ([0]+[3])<<<7
t = (v0 + v3) & 0xffffffff
v1 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 17: [2] ^= ([1]+[0])<<<9
t = (v1 + v0) & 0xffffffff
v2 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 18: [3] ^= ([2]+[1])<<<13
t = (v2 + v1) & 0xffffffff
v3 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 19: [0] ^= ([3]+[2])<<<18
t = (v3 + v2) & 0xffffffff
v0 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 20: [6] ^= ([5]+[4])<<<7
t = (v5 + v4) & 0xffffffff
v6 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 21: [7] ^= ([6]+[5])<<<9
t = (v6 + v5) & 0xffffffff
v7 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 22: [4] ^= ([7]+[6])<<<13
t = (v7 + v6) & 0xffffffff
v4 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 23: [5] ^= ([4]+[7])<<<18
t = (v4 + v7) & 0xffffffff
v5 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 24: [11] ^= ([10]+[9])<<<7
t = (v10 + v9) & 0xffffffff
v11 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 25: [8] ^= ([11]+[10])<<<9
t = (v11 + v10) & 0xffffffff
v8 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 26: [9] ^= ([8]+[11])<<<13
t = (v8 + v11) & 0xffffffff
v9 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 27: [10] ^= ([9]+[8])<<<18
t = (v9 + v8) & 0xffffffff
v10 ^= ((t & 0x00003fff) << 18) | (t >> 14)
# salsa op 28: [12] ^= ([15]+[14])<<<7
t = (v15 + v14) & 0xffffffff
v12 ^= ((t & 0x01ffffff) << 7) | (t >> 25)
# salsa op 29: [13] ^= ([12]+[15])<<<9
t = (v12 + v15) & 0xffffffff
v13 ^= ((t & 0x007fffff) << 9) | (t >> 23)
# salsa op 30: [14] ^= ([13]+[12])<<<13
t = (v13 + v12) & 0xffffffff
v14 ^= ((t & 0x0007ffff) << 13) | (t >> 19)
# salsa op 31: [15] ^= ([14]+[13])<<<18
t = (v14 + v13) & 0xffffffff
v15 ^= ((t & 0x00003fff) << 18) | (t >> 14)
i += 1
b0 = (b0 + v0) & 0xffffffff
b1 = (b1 + v1) & 0xffffffff
b2 = (b2 + v2) & 0xffffffff
b3 = (b3 + v3) & 0xffffffff
b4 = (b4 + v4) & 0xffffffff
b5 = (b5 + v5) & 0xffffffff
b6 = (b6 + v6) & 0xffffffff
b7 = (b7 + v7) & 0xffffffff
b8 = (b8 + v8) & 0xffffffff
b9 = (b9 + v9) & 0xffffffff
b10 = (b10 + v10) & 0xffffffff
b11 = (b11 + v11) & 0xffffffff
b12 = (b12 + v12) & 0xffffffff
b13 = (b13 + v13) & 0xffffffff
b14 = (b14 + v14) & 0xffffffff
b15 = (b15 + v15) & 0xffffffff
return b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15
#=================================================================
# eof
#================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/scrypt/_salsa.py | _salsa.py |
#==========================================================================
# imports
#==========================================================================
# core
import os
# pkg
# local
#==========================================================================
# constants
#==========================================================================
_SALSA_OPS = [
# row = (target idx, source idx 1, source idx 2, rotate)
# interpreted as salsa operation over uint32...
# target = (source1+source2)<<rotate
##/* Operate on columns. */
##define R(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
##x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9);
##x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18);
( 4, 0, 12, 7),
( 8, 4, 0, 9),
( 12, 8, 4, 13),
( 0, 12, 8, 18),
##x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9);
##x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18);
( 9, 5, 1, 7),
( 13, 9, 5, 9),
( 1, 13, 9, 13),
( 5, 1, 13, 18),
##x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9);
##x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18);
( 14, 10, 6, 7),
( 2, 14, 10, 9),
( 6, 2, 14, 13),
( 10, 6, 2, 18),
##x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9);
##x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18);
( 3, 15, 11, 7),
( 7, 3, 15, 9),
( 11, 7, 3, 13),
( 15, 11, 7, 18),
##/* Operate on rows. */
##x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9);
##x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18);
( 1, 0, 3, 7),
( 2, 1, 0, 9),
( 3, 2, 1, 13),
( 0, 3, 2, 18),
##x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9);
##x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18);
( 6, 5, 4, 7),
( 7, 6, 5, 9),
( 4, 7, 6, 13),
( 5, 4, 7, 18),
##x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9);
##x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18);
( 11, 10, 9, 7),
( 8, 11, 10, 9),
( 9, 8, 11, 13),
( 10, 9, 8, 18),
##x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9);
##x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18);
( 12, 15, 14, 7),
( 13, 12, 15, 9),
( 14, 13, 12, 13),
( 15, 14, 13, 18),
]
def main():
target = os.path.join(os.path.dirname(__file__), "_salsa.py")
fh = file(target, "w")
write = fh.write
VNAMES = ["v%d" % i for i in range(16)]
PAD = " " * 4
PAD2 = " " * 8
PAD3 = " " * 12
TLIST = ", ".join("b%d" % i for i in range(16))
VLIST = ", ".join(VNAMES)
kwds = dict(
VLIST=VLIST,
TLIST=TLIST,
)
write('''\
"""zdppy_password_hash.utils.scrypt._salsa - salsa 20/8 core, autogenerated by _gen_salsa.py"""
#=================================================================
# salsa function
#=================================================================
def salsa20(input):
\"""apply the salsa20/8 core to the provided input
:args input: input list containing 16 32-bit integers
:returns: result list containing 16 32-bit integers
\"""
%(TLIST)s = input
%(VLIST)s = \\
%(TLIST)s
i = 0
while i < 4:
''' % kwds)
for idx, (target, source1, source2, rotate) in enumerate(_SALSA_OPS):
write('''\
# salsa op %(idx)d: [%(it)d] ^= ([%(is1)d]+[%(is2)d])<<<%(rot1)d
t = (%(src1)s + %(src2)s) & 0xffffffff
%(dst)s ^= ((t & 0x%(rmask)08x) << %(rot1)d) | (t >> %(rot2)d)
''' % dict(
idx=idx, is1 = source1, is2=source2, it=target,
src1=VNAMES[source1],
src2=VNAMES[source2],
dst=VNAMES[target],
rmask=(1<<(32-rotate))-1,
rot1=rotate,
rot2=32-rotate,
))
write('''\
i += 1
''')
for idx in range(16):
write(PAD + "b%d = (b%d + v%d) & 0xffffffff\n" % (idx,idx,idx))
write('''\
return %(TLIST)s
#=================================================================
# eof
#=================================================================
''' % kwds)
if __name__ == "__main__":
main()
#==========================================================================
# eof
#========================================================================== | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/scrypt/_gen_files.py | _gen_files.py |
from __future__ import absolute_import
# core
import logging; log = logging.getLogger(__name__)
from warnings import warn
# pkg
from zdppy_password_hash import exc
from zdppy_password_hash.utils import to_bytes
from zdppy_password_hash.utils.compat import PYPY
# local
__all__ =[
"validate",
"scrypt",
]
#==========================================================================
# config validation
#==========================================================================
#: internal global constant for setting stdlib scrypt's maxmem (int bytes).
#: set to -1 to auto-calculate (see _load_stdlib_backend() below)
#: set to 0 for openssl default (32mb according to python docs)
#: TODO: standardize this across backends, and expose support via scrypt hash config;
#: currently not very configurable, and only applies to stdlib backend.
SCRYPT_MAXMEM = -1
#: max output length in bytes
MAX_KEYLEN = ((1 << 32) - 1) * 32
#: max ``r * p`` limit
MAX_RP = (1 << 30) - 1
# TODO: unittests for this function
def validate(n, r, p):
"""
helper which validates a set of scrypt config parameters.
scrypt will take ``O(n * r * p)`` time and ``O(n * r)`` memory.
limitations are that ``n = 2**<positive integer>``, ``n < 2**(16*r)``, ``r * p < 2 ** 30``.
:param n: scrypt rounds
:param r: scrypt block size
:param p: scrypt parallel factor
"""
if r < 1:
raise ValueError("r must be > 0: r=%r" % r)
if p < 1:
raise ValueError("p must be > 0: p=%r" % p)
if r * p > MAX_RP:
# pbkdf2-hmac-sha256 limitation - it will be requested to generate ``p*(2*r)*64`` bytes,
# but pbkdf2 can do max of (2**31-1) blocks, and sha-256 has 32 byte block size...
# so ``(2**31-1)*32 >= p*r*128`` -> ``r*p < 2**30``
raise ValueError("r * p must be < 2**30: r=%r, p=%r" % (r,p))
if n < 2 or n & (n - 1):
raise ValueError("n must be > 1, and a power of 2: n=%r" % n)
return True
UINT32_SIZE = 4
def estimate_maxmem(n, r, p, fudge=1.05):
"""
calculate memory required for parameter combination.
assumes parameters have already been validated.
.. warning::
this is derived from OpenSSL's scrypt maxmem formula;
and may not be correct for other implementations
(additional buffers, different parallelism tradeoffs, etc).
"""
# XXX: expand to provide upper bound for diff backends, or max across all of them?
# NOTE: openssl's scrypt() enforces it's maxmem parameter based on calc located at
# <openssl/providers/default/kdfs/scrypt.c>, ending in line containing "Blen + Vlen > maxmem"
# using the following formula:
# Blen = p * 128 * r
# Vlen = 32 * r * (N + 2) * sizeof(uint32_t)
# total_bytes = Blen + Vlen
maxmem = r * (128 * p + 32 * (n + 2) * UINT32_SIZE)
# add fudge factor so we don't have off-by-one mismatch w/ openssl
maxmem = int(maxmem * fudge)
return maxmem
# TODO: configuration picker (may need psutil for full effect)
#==========================================================================
# hash frontend
#==========================================================================
#: backend function used by scrypt(), filled in by _set_backend()
_scrypt = None
#: name of backend currently in use, exposed for informational purposes.
backend = None
def scrypt(secret, salt, n, r, p=1, keylen=32):
"""run SCrypt key derivation function using specified parameters.
:arg secret:
passphrase string (unicode is encoded to bytes using utf-8).
:arg salt:
salt string (unicode is encoded to bytes using utf-8).
:arg n:
integer 'N' parameter
:arg r:
integer 'r' parameter
:arg p:
integer 'p' parameter
:arg keylen:
number of bytes of key to generate.
defaults to 32 (the internal block size).
:returns:
a *keylen*-sized bytes instance
SCrypt imposes a number of constraints on it's input parameters:
* ``r * p < 2**30`` -- due to a limitation of PBKDF2-HMAC-SHA256.
* ``keylen < (2**32 - 1) * 32`` -- due to a limitation of PBKDF2-HMAC-SHA256.
* ``n`` must a be a power of 2, and > 1 -- internal limitation of scrypt() implementation
:raises ValueError: if the provided parameters are invalid (see constraints above).
.. warning::
Unless the third-party ``scrypt <https://pypi.python.org/pypi/scrypt/>``_ package
is installed, zdppy_password_hash will use a builtin pure-python implementation of scrypt,
which is *considerably* slower (and thus requires a much lower / less secure
``n`` value in order to be usuable). Installing the :mod:`!scrypt` package
is strongly recommended.
"""
validate(n, r, p)
secret = to_bytes(secret, param="secret")
salt = to_bytes(salt, param="salt")
if keylen < 1:
raise ValueError("keylen must be at least 1")
if keylen > MAX_KEYLEN:
raise ValueError("keylen too large, must be <= %d" % MAX_KEYLEN)
return _scrypt(secret, salt, n, r, p, keylen)
def _load_builtin_backend():
"""
Load pure-python scrypt implementation built into zdppy_password_hash.
"""
slowdown = 10 if PYPY else 100
warn("Using builtin scrypt backend, which is %dx slower than is required "
"for adequate security. Installing scrypt support (via 'pip install scrypt') "
"is strongly recommended" % slowdown, exc.PasslibSecurityWarning)
from ._builtin import ScryptEngine
return ScryptEngine.execute
def _load_cffi_backend():
"""
Try to import the ctypes-based scrypt hash function provided by the
``scrypt <https://pypi.python.org/pypi/scrypt/>``_ package.
"""
try:
from scrypt import hash
return hash
except ImportError:
pass
# not available, but check to see if package present but outdated / not installed right
try:
import scrypt
except ImportError as err:
if "scrypt" not in str(err):
# e.g. if cffi isn't set up right
# user should try importing scrypt explicitly to diagnose problem.
warn("'scrypt' package failed to import correctly (possible installation issue?)",
exc.PasslibWarning)
# else: package just isn't installed
else:
warn("'scrypt' package is too old (lacks ``hash()`` method)", exc.PasslibWarning)
return None
def _load_stdlib_backend():
"""
Attempt to load stdlib scrypt() implement and return wrapper.
Returns None if not found.
"""
try:
# new in python 3.6, if compiled with openssl >= 1.1
from hashlib import scrypt as stdlib_scrypt
except ImportError:
return None
def stdlib_scrypt_wrapper(secret, salt, n, r, p, keylen):
# work out appropriate "maxmem" parameter
#
# TODO: would like to enforce a single "maxmem" policy across all backends;
# and maybe expose this via scrypt hasher config.
#
# for now, since parameters should all be coming from internally-controlled sources
# (password hashes), using policy of "whatever memory the parameters needs".
# furthermore, since stdlib scrypt is only place that needs this,
# currently calculating exactly what maxmem needs to make things work for stdlib call.
# as hack, this can be overriden via SCRYPT_MAXMEM above,
# would like to formalize all of this.
maxmem = SCRYPT_MAXMEM
if maxmem < 0:
maxmem = estimate_maxmem(n, r, p)
return stdlib_scrypt(password=secret, salt=salt, n=n, r=r, p=p, dklen=keylen,
maxmem=maxmem)
return stdlib_scrypt_wrapper
#: list of potential backends
backend_values = ("stdlib", "scrypt", "builtin")
#: dict mapping backend name -> loader
_backend_loaders = dict(
stdlib=_load_stdlib_backend,
scrypt=_load_cffi_backend, # XXX: rename backend constant to "cffi"?
builtin=_load_builtin_backend,
)
def _set_backend(name, dryrun=False):
"""
set backend for scrypt(). if name not specified, loads first available.
:raises ~zdppy_password_hash.exc.MissingBackendError: if backend can't be found
.. note:: mainly intended to be called by unittests, and scrypt hash handler
"""
if name == "any":
return
elif name == "default":
for name in backend_values:
try:
return _set_backend(name, dryrun=dryrun)
except exc.MissingBackendError:
continue
raise exc.MissingBackendError("no scrypt backends available")
else:
loader = _backend_loaders.get(name)
if not loader:
raise ValueError("unknown scrypt backend: %r" % (name,))
hash = loader()
if not hash:
raise exc.MissingBackendError("scrypt backend %r not available" % name)
if dryrun:
return
global _scrypt, backend
backend = name
_scrypt = hash
# initialize backend
_set_backend("default")
def _has_backend(name):
try:
_set_backend(name, dryrun=True)
return True
except exc.MissingBackendError:
return False
#==========================================================================
# eof
#========================================================================== | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/scrypt/__init__.py | __init__.py |
#=============================================================================
# imports
#=============================================================================
# core
import struct
# pkg
from zdppy_password_hash.utils import repeat_string
# local
__all__ = [
"BlowfishEngine",
]
#=============================================================================
# blowfish constants
#=============================================================================
BLOWFISH_P = BLOWFISH_S = None
def _init_constants():
global BLOWFISH_P, BLOWFISH_S
# NOTE: blowfish's spec states these numbers are the hex representation
# of the fractional portion of PI, in order.
# Initial contents of key schedule - 18 integers
BLOWFISH_P = [
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
0x9216d5d9, 0x8979fb1b,
]
# all 4 blowfish S boxes in one array - 256 integers per S box
BLOWFISH_S = [
# sbox 1
[
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
],
# sbox 2
[
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
],
# sbox 3
[
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
],
# sbox 4
[
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
]
]
#=============================================================================
# engine
#=============================================================================
class BlowfishEngine(object):
def __init__(self):
if BLOWFISH_P is None:
_init_constants()
self.P = list(BLOWFISH_P)
self.S = [ list(box) for box in BLOWFISH_S ]
#===================================================================
# common helpers
#===================================================================
@staticmethod
def key_to_words(data, size=18):
"""convert data to tuple of <size> 4-byte integers, repeating or
truncating data as needed to reach specified size"""
assert isinstance(data, bytes)
dlen = len(data)
if not dlen:
# return all zeros - original C code would just read the NUL after
# the password, so mimicing that behavior for this edge case.
return [0]*size
# repeat data until it fills up 4*size bytes
data = repeat_string(data, size<<2)
# unpack
return struct.unpack(">%dI" % (size,), data)
#===================================================================
# blowfish routines
#===================================================================
def encipher(self, l, r):
"""loop version of blowfish encipher routine"""
P, S = self.P, self.S
l ^= P[0]
i = 1
while i < 17:
# Feistel substitution on left word
r = ((((S[0][l >> 24] + S[1][(l >> 16) & 0xff]) ^ S[2][(l >> 8) & 0xff]) +
S[3][l & 0xff]) & 0xffffffff) ^ P[i] ^ r
# swap vars so even rounds do Feistel substition on right word
l, r = r, l
i += 1
return r ^ P[17], l
# NOTE: decipher is same as above, just with reversed(P) instead.
def expand(self, key_words):
"""perform stock Blowfish keyschedule setup"""
assert len(key_words) >= 18, "key_words must be at least as large as P"
P, S, encipher = self.P, self.S, self.encipher
i = 0
while i < 18:
P[i] ^= key_words[i]
i += 1
i = l = r = 0
while i < 18:
P[i], P[i+1] = l,r = encipher(l,r)
i += 2
for box in S:
i = 0
while i < 256:
box[i], box[i+1] = l,r = encipher(l,r)
i += 2
#===================================================================
# eks-blowfish routines
#===================================================================
def eks_salted_expand(self, key_words, salt_words):
"""perform EKS' salted version of Blowfish keyschedule setup"""
# NOTE: this is the same as expand(), except for the addition
# of the operations involving *salt_words*.
assert len(key_words) >= 18, "key_words must be at least as large as P"
salt_size = len(salt_words)
assert salt_size, "salt_words must not be empty"
assert not salt_size & 1, "salt_words must have even length"
P, S, encipher = self.P, self.S, self.encipher
i = 0
while i < 18:
P[i] ^= key_words[i]
i += 1
s = i = l = r = 0
while i < 18:
l ^= salt_words[s]
r ^= salt_words[s+1]
s += 2
if s == salt_size:
s = 0
P[i], P[i+1] = l,r = encipher(l,r) # next()
i += 2
for box in S:
i = 0
while i < 256:
l ^= salt_words[s]
r ^= salt_words[s+1]
s += 2
if s == salt_size:
s = 0
box[i], box[i+1] = l,r = encipher(l,r) # next()
i += 2
def eks_repeated_expand(self, key_words, salt_words, rounds):
"""perform rounds stage of EKS keyschedule setup"""
expand = self.expand
n = 0
while n < rounds:
expand(key_words)
expand(salt_words)
n += 1
def repeat_encipher(self, l, r, count):
"""repeatedly apply encipher operation to a block"""
encipher = self.encipher
n = 0
while n < count:
l, r = encipher(l, r)
n += 1
return l, r
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/_blowfish/base.py | base.py |
from zdppy_password_hash.crypto._blowfish.base import BlowfishEngine as _BlowfishEngine
# local
__all__ = [
"BlowfishEngine",
]
#=============================================================================
#
#=============================================================================
class BlowfishEngine(_BlowfishEngine):
def encipher(self, l, r):
"""blowfish encipher a single 64-bit block encoded as two 32-bit ints"""
(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9,
p10, p11, p12, p13, p14, p15, p16, p17) = self.P
S0, S1, S2, S3 = self.S
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
return r ^ p17, l
def expand(self, key_words):
"""unrolled version of blowfish key expansion"""
##assert len(key_words) >= 18, "size of key_words must be >= 18"
P, S = self.P, self.S
S0, S1, S2, S3 = S
#=============================================================
# integrate key
#=============================================================
p0 = P[0] ^ key_words[0]
p1 = P[1] ^ key_words[1]
p2 = P[2] ^ key_words[2]
p3 = P[3] ^ key_words[3]
p4 = P[4] ^ key_words[4]
p5 = P[5] ^ key_words[5]
p6 = P[6] ^ key_words[6]
p7 = P[7] ^ key_words[7]
p8 = P[8] ^ key_words[8]
p9 = P[9] ^ key_words[9]
p10 = P[10] ^ key_words[10]
p11 = P[11] ^ key_words[11]
p12 = P[12] ^ key_words[12]
p13 = P[13] ^ key_words[13]
p14 = P[14] ^ key_words[14]
p15 = P[15] ^ key_words[15]
p16 = P[16] ^ key_words[16]
p17 = P[17] ^ key_words[17]
#=============================================================
# update P
#=============================================================
#------------------------------------------------
# update P[0] and P[1]
#------------------------------------------------
l, r = p0, 0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p0, p1 = l, r = r ^ p17, l
#------------------------------------------------
# update P[2] and P[3]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p2, p3 = l, r = r ^ p17, l
#------------------------------------------------
# update P[4] and P[5]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p4, p5 = l, r = r ^ p17, l
#------------------------------------------------
# update P[6] and P[7]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p6, p7 = l, r = r ^ p17, l
#------------------------------------------------
# update P[8] and P[9]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p8, p9 = l, r = r ^ p17, l
#------------------------------------------------
# update P[10] and P[11]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p10, p11 = l, r = r ^ p17, l
#------------------------------------------------
# update P[12] and P[13]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p12, p13 = l, r = r ^ p17, l
#------------------------------------------------
# update P[14] and P[15]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p14, p15 = l, r = r ^ p17, l
#------------------------------------------------
# update P[16] and P[17]
#------------------------------------------------
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
p16, p17 = l, r = r ^ p17, l
#------------------------------------------------
# save changes to original P array
#------------------------------------------------
P[:] = (p0, p1, p2, p3, p4, p5, p6, p7, p8, p9,
p10, p11, p12, p13, p14, p15, p16, p17)
#=============================================================
# update S
#=============================================================
for box in S:
j = 0
while j < 256:
l ^= p0
# Feistel substitution on left word (round 0)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p1
# Feistel substitution on right word (round 1)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p2
# Feistel substitution on left word (round 2)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p3
# Feistel substitution on right word (round 3)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p4
# Feistel substitution on left word (round 4)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p5
# Feistel substitution on right word (round 5)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p6
# Feistel substitution on left word (round 6)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p7
# Feistel substitution on right word (round 7)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p8
# Feistel substitution on left word (round 8)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p9
# Feistel substitution on right word (round 9)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p10
# Feistel substitution on left word (round 10)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p11
# Feistel substitution on right word (round 11)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p12
# Feistel substitution on left word (round 12)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p13
# Feistel substitution on right word (round 13)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p14
# Feistel substitution on left word (round 14)
r ^= ((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff) ^ p15
# Feistel substitution on right word (round 15)
l ^= ((((S0[r >> 24] + S1[(r >> 16) & 0xff]) ^ S2[(r >> 8) & 0xff]) +
S3[r & 0xff]) & 0xffffffff) ^ p16
box[j], box[j+1] = l, r = r ^ p17, l
j += 2
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/_blowfish/unrolled.py | unrolled.py |
#=============================================================================
# imports
#=============================================================================
# core
import os
import textwrap
# pkg
from zdppy_password_hash.utils.compat import irange
# local
#=============================================================================
# helpers
#=============================================================================
def varlist(name, count):
return ", ".join(name + str(x) for x in irange(count))
def indent_block(block, padding):
"""ident block of text"""
lines = block.split("\n")
return "\n".join(
padding + line if line else ""
for line in lines
)
BFSTR = """\
((((S0[l >> 24] + S1[(l >> 16) & 0xff]) ^ S2[(l >> 8) & 0xff]) +
S3[l & 0xff]) & 0xffffffff)
""".strip()
def render_encipher(write, indent=0):
for i in irange(0, 15, 2):
write(indent, """\
# Feistel substitution on left word (round %(i)d)
r ^= %(left)s ^ p%(i1)d
# Feistel substitution on right word (round %(i1)d)
l ^= %(right)s ^ p%(i2)d
""", i=i, i1=i+1, i2=i+2,
left=BFSTR, right=BFSTR.replace("l","r"),
)
def write_encipher_function(write, indent=0):
write(indent, """\
def encipher(self, l, r):
\"""blowfish encipher a single 64-bit block encoded as two 32-bit ints\"""
(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9,
p10, p11, p12, p13, p14, p15, p16, p17) = self.P
S0, S1, S2, S3 = self.S
l ^= p0
""")
render_encipher(write, indent+1)
write(indent+1, """\
return r ^ p17, l
""")
def write_expand_function(write, indent=0):
write(indent, """\
def expand(self, key_words):
\"""unrolled version of blowfish key expansion\"""
##assert len(key_words) >= 18, "size of key_words must be >= 18"
P, S = self.P, self.S
S0, S1, S2, S3 = S
#=============================================================
# integrate key
#=============================================================
""")
for i in irange(18):
write(indent+1, """\
p%(i)d = P[%(i)d] ^ key_words[%(i)d]
""", i=i)
write(indent+1, """\
#=============================================================
# update P
#=============================================================
#------------------------------------------------
# update P[0] and P[1]
#------------------------------------------------
l, r = p0, 0
""")
render_encipher(write, indent+1)
write(indent+1, """\
p0, p1 = l, r = r ^ p17, l
""")
for i in irange(2, 18, 2):
write(indent+1, """\
#------------------------------------------------
# update P[%(i)d] and P[%(i1)d]
#------------------------------------------------
l ^= p0
""", i=i, i1=i+1)
render_encipher(write, indent+1)
write(indent+1, """\
p%(i)d, p%(i1)d = l, r = r ^ p17, l
""", i=i, i1=i+1)
write(indent+1, """\
#------------------------------------------------
# save changes to original P array
#------------------------------------------------
P[:] = (p0, p1, p2, p3, p4, p5, p6, p7, p8, p9,
p10, p11, p12, p13, p14, p15, p16, p17)
#=============================================================
# update S
#=============================================================
for box in S:
j = 0
while j < 256:
l ^= p0
""")
render_encipher(write, indent+3)
write(indent+3, """\
box[j], box[j+1] = l, r = r ^ p17, l
j += 2
""")
#=============================================================================
# main
#=============================================================================
def main():
target = os.path.join(os.path.dirname(__file__), "unrolled.py")
fh = file(target, "w")
def write(indent, msg, **kwds):
literal = kwds.pop("literal", False)
if kwds:
msg %= kwds
if not literal:
msg = textwrap.dedent(msg.rstrip(" "))
if indent:
msg = indent_block(msg, " " * (indent*4))
fh.write(msg)
write(0, """\
\"""zdppy_password_hash.crypto._blowfish.unrolled - unrolled loop implementation of bcrypt,
autogenerated by _gen_files.py
currently this override the encipher() and expand() methods
with optimized versions, and leaves the other base.py methods alone.
\"""
#=================================================================
# imports
#=================================================================
# pkg
from zdppy_password_hash.crypto._blowfish.base import BlowfishEngine as _BlowfishEngine
# local
__all__ = [
"BlowfishEngine",
]
#=================================================================
#
#=================================================================
class BlowfishEngine(_BlowfishEngine):
""")
write_encipher_function(write, indent=1)
write_expand_function(write, indent=1)
write(0, """\
#=================================================================
# eoc
#=================================================================
#=================================================================
# eof
#=================================================================
""")
if __name__ == "__main__":
main()
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/_blowfish/_gen_files.py | _gen_files.py |
from itertools import chain
import struct
# pkg
from zdppy_password_hash.utils import getrandbytes, rng
from zdppy_password_hash.utils.binary import bcrypt64
from zdppy_password_hash.utils.compat import BytesIO, unicode, u, native_string_types
from zdppy_password_hash.crypto._blowfish.unrolled import BlowfishEngine
# local
__all__ = [
'BlowfishEngine',
'raw_bcrypt',
]
#=============================================================================
# bcrypt constants
#=============================================================================
# bcrypt constant data "OrpheanBeholderScryDoubt" as 6 integers
BCRYPT_CDATA = [
0x4f727068, 0x65616e42, 0x65686f6c,
0x64657253, 0x63727944, 0x6f756274
]
# struct used to encode ciphertext as digest (last output byte discarded)
digest_struct = struct.Struct(">6I")
#=============================================================================
# base bcrypt helper
#
# interface designed only for use by zdppy_password_hash.handlers.bcrypt:BCrypt
# probably not suitable for other purposes
#=============================================================================
BNULL = b'\x00'
def raw_bcrypt(password, ident, salt, log_rounds):
"""perform central password hashing step in bcrypt scheme.
:param password: the password to hash
:param ident: identifier w/ minor version (e.g. 2, 2a)
:param salt: the binary salt to use (encoded in bcrypt-base64)
:param log_rounds: the log2 of the number of rounds (as int)
:returns: bcrypt-base64 encoded checksum
"""
#===================================================================
# parse inputs
#===================================================================
# parse ident
assert isinstance(ident, native_string_types)
add_null_padding = True
if ident == u('2a') or ident == u('2y') or ident == u('2b'):
pass
elif ident == u('2'):
add_null_padding = False
elif ident == u('2x'):
raise ValueError("crypt_blowfish's buggy '2x' hashes are not "
"currently supported")
else:
raise ValueError("unknown ident: %r" % (ident,))
# decode & validate salt
assert isinstance(salt, bytes)
salt = bcrypt64.decode_bytes(salt)
if len(salt) < 16:
raise ValueError("Missing salt bytes")
elif len(salt) > 16:
salt = salt[:16]
# prepare password
assert isinstance(password, bytes)
if add_null_padding:
password += BNULL
# validate rounds
if log_rounds < 4 or log_rounds > 31:
raise ValueError("Bad number of rounds")
#===================================================================
#
# run EKS-Blowfish algorithm
#
# This uses the "enhanced key schedule" step described by
# Provos and Mazieres in "A Future-Adaptable Password Scheme"
# http://www.openbsd.org/papers/bcrypt-paper.ps
#
#===================================================================
engine = BlowfishEngine()
# convert password & salt into list of 18 32-bit integers (72 bytes total).
pass_words = engine.key_to_words(password)
salt_words = engine.key_to_words(salt)
# truncate salt_words to original 16 byte salt, or loop won't wrap
# correctly when passed to .eks_salted_expand()
salt_words16 = salt_words[:4]
# do EKS key schedule setup
engine.eks_salted_expand(pass_words, salt_words16)
# apply password & salt keys to key schedule a bunch more times.
rounds = 1<<log_rounds
engine.eks_repeated_expand(pass_words, salt_words, rounds)
# encipher constant data, and encode to bytes as digest.
data = list(BCRYPT_CDATA)
i = 0
while i < 6:
data[i], data[i+1] = engine.repeat_encipher(data[i], data[i+1], 64)
i += 2
raw = digest_struct.pack(*data)[:-1]
return bcrypt64.encode_bytes(raw)
#=============================================================================
# eof
#============================================================================= | zdppy-password-hash | /zdppy_password_hash-0.1.0.tar.gz/zdppy_password_hash-0.1.0/zdppy_password_hash/crypto/_blowfish/__init__.py | __init__.py |
# zdppy_password
Python密码工具
项目地址:https://github.com/zhangdapeng520/zdppy_password
## 版本历史
- 2022年3月29日 版本0.1.0 兼容Python和Go的AES RSA加密解密算法
## 常用命令
生成私钥
```shell
openssl genrsa -out private.pem 1024
```
生成公钥
```shell
openssl rsa -in private.pem -pubout -out public.pem
```
## 使用案例
### 案例1:AES加密和解密
```python
from zdppy_password.aes import Aes
aes = Aes()
res = aes.encrypt(b'{"cmd": 3000, "msg": "ok"}').decode(encoding='utf-8')
print(res)
print(aes.decrypt(res))
# 从go复制过来的
print(aes.decrypt("0qg69fOjmE0oR59muWdXoWhr5d4Z0XyQaC69684mAsw="))
```
## 案例2:RSA加密和解密
```python
from zdppy_password.rsa import Rsa
import json
rsa = Rsa()
data = {"username": "张大鹏", "age": 22}
data = json.dumps(data)
print(data)
# 加密
secret = rsa.encrypt(data, "public.pem")
# 解密
print(json.loads(rsa.decrypt(secret, "private.pem")))
# 从go复制过来的
data = "NoA3e0HDMhj7nrwKUx975lUZgjRIA1ZFcEBLeAvgYQ7Nu7toic7xXtg9qmD+wr6soZzb6Gl37H1I5j9OlOTR9igQ+p1pXPOWo47DyDpw3UjiQ6eOAYmyT53lMUGylLZIKHhnbpea5Qpjl+dHrWVYsQ864/asS1ewe9k2hR+BlkBuZSP8p6oiJ+BBOVYckqPFf6PWBjAFGAMridMXglYrKZ2v7+QdwU4mq2YEBVD5XdY70lIEg4XIY8Wb6n5tBB5XkzLsqd22XcBhnEPGLmMC4fuEMyLptH5dMGF/Ogi9YDAP/rKvzdTTpFXPLPh5eeqMMXAS5+AigE1jx1M3w+7IUw=="
print(rsa.decrypt(data, "private.pem"))
``` | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/README.md | README.md |
import os
import base64
from Crypto import Random
from zdppy_log import Log
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA
from Crypto.Signature import PKCS1_v1_5 as PKCS1_signature
from Crypto.Cipher import PKCS1_v1_5 as PKCS1_cipher
def get_key(key_path: str):
if not os.path.exists(key_path):
return
with open(key_path) as f:
data = f.read()
key = RSA.importKey(data)
return key
def encrypt(
data: str,
public_key_path: str = "public_key.pem",
):
public_key = get_key(public_key_path)
cipher = PKCS1_cipher.new(public_key)
encrypt_text = base64.b64encode(cipher.encrypt(bytes(data.encode("utf8"))))
return encrypt_text.decode('utf-8')
def decrypt(
data: str,
private_key_path: str = "private_key.pem",
):
private_key = get_key(private_key_path)
cipher = PKCS1_cipher.new(private_key)
back_text = cipher.decrypt(base64.b64decode(data), 0)
return back_text.decode('utf-8')
def signer(data, private_key_path: str = "private.pem"):
private_key = get_key(private_key_path)
signer_obj = PKCS1_signature.new(private_key)
digest = SHA.new()
digest.update(data.encode("utf8"))
sign = signer_obj.sign(digest)
signature = base64.b64encode(sign)
signature = signature.decode('utf-8')
return signature
def verify(text, signature, public_key_path: str = "public.pem"):
public_key = get_key(public_key_path)
verifier = PKCS1_signature.new(public_key)
digest = SHA.new()
digest.update(text.encode("utf8"))
return verifier.verify(digest, base64.b64decode(signature))
class Rsa:
def __init__(
self,
key_length: int = 1024,
log_file_path: str = "logs/zdppy/zdppy_password.log",
debug: bool = True,
):
"""
初始化Rsa加密对象
:param key_length: 生成key的长度,长度越长越安全,但是速度也越慢。必须大于或等于1024。
"""
random_generator = Random.new().read
self.rsa = RSA.generate(key_length, random_generator)
self.log = Log(log_file_path=log_file_path, debug=debug)
self.get_key = get_key # 获取key
self.decrypt = decrypt # 加密
self.encrypt = encrypt # 解密
self.signer = signer # 签名
self.verify = verify # 校验
def generate_private_key(
self,
private_key_path: str = None,
is_to_str: bool = True,
):
"""
生成RSA私钥
:param is_to_str: 是否转换为字符串
:param private_key_path: 私钥文件保存的路径
:return: 私钥
"""
# 如果已存在,读取返回
if private_key_path is not None and os.path.exists(private_key_path):
with open(private_key_path, "rb") as f:
result = f.read()
if is_to_str:
result = result.decode()
return result
# 生成私钥
result = self.rsa.exportKey()
# 保存
if private_key_path is not None and isinstance(private_key_path, str):
with open(private_key_path, "wb") as f:
f.write(result)
# 转换为字符串
if is_to_str:
result = result.decode('utf-8')
# 返回结果
return result
def generate_public_key(
self,
public_key_path: str = None,
is_to_str: bool = True,
):
"""
生成RSA公钥
:param is_to_str: 是否转换为字符串
:param public_key_path: 公钥文件保存的路径
:return: 公钥
"""
# 如果已存在,读取返回
if public_key_path is not None and os.path.exists(public_key_path):
with open(public_key_path, "rb") as f:
result = f.read()
if is_to_str:
result = result.decode()
return result
# 生成公钥
result = self.rsa.publickey().exportKey()
# 保存
if public_key_path is not None and isinstance(public_key_path, str):
with open(public_key_path, "wb") as f:
f.write(result)
# 转换为字符串
if is_to_str:
result = result.decode('utf-8')
# 返回结果
return result | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/rsa.py | rsa.py |
import binascii
from tinyec import registry
from Crypto.Cipher import AES
import hashlib
import secrets
import base64
import json
class Ecc:
def __init__(self):
self.curve = registry.get_curve('brainpoolP256r1')
self.private_key = secrets.randbelow(self.curve.field.n)
self.public_key = self.private_key * self.curve.g
@staticmethod
def __encrypt_aes_gcm(data, secret_key):
aes_cipher = AES.new(secret_key, AES.MODE_GCM)
ciphertext, auth_tag = aes_cipher.encrypt_and_digest(data)
return ciphertext, aes_cipher.nonce, auth_tag
@staticmethod
def __decrypt_aes_gcm(ciphertext, nonce, auth_tag, secret_key):
aes_cipher = AES.new(secret_key, AES.MODE_GCM, nonce)
plaintext = aes_cipher.decrypt_and_verify(ciphertext, auth_tag)
return plaintext
@staticmethod
def __ecc_point_to_256_bit_key(point):
sha = hashlib.sha256(int.to_bytes(point.x, 32, 'big'))
sha.update(int.to_bytes(point.y, 32, 'big'))
return sha.digest()
def encrypt(self, data):
"""
ecc加密
:param data: 要加密的数据
:return: 加密后的数据
"""
cipher_text_private_key = secrets.randbelow(self.curve.field.n)
shared_ecc_key = cipher_text_private_key * self.public_key
secret_key = self.__ecc_point_to_256_bit_key(shared_ecc_key)
ciphertext, nonce, auth_tag = self.__encrypt_aes_gcm(data, secret_key)
cipher_text_public_key = cipher_text_private_key * self.curve.g
# 转换为加密字符串
print("============", ciphertext, type(ciphertext))
# 转换为16进制
ciphertext16 = binascii.hexlify(ciphertext)
print("============", ciphertext16)
print("============", ciphertext16.decode())
_data = ciphertext, nonce, auth_tag, cipher_text_public_key
return _data
def decrypt(self, data):
"""
ecc解密
:param data: 要解密的数据
:return: 解密后的数据
"""
(cipher_text, nonce, auth_tag, ciphertext_public_key) = data
shared_ecc_ey = self.private_key * ciphertext_public_key
secret_key = self.__ecc_point_to_256_bit_key(shared_ecc_ey)
_data = self.__decrypt_aes_gcm(cipher_text, nonce, auth_tag, secret_key)
return _data
if __name__ == '__main__':
data = b'Text to be encrypted by ECC public key and decrypted by its corresponding ECC private key'
print("original data:", data)
# 创建私钥
ecc = Ecc()
print("私钥:", ecc.private_key)
# 创建公钥
print("公钥:", ecc.public_key)
# 加密内容
encrypted_data = ecc.encrypt(data)
print("encrypted data:", encrypted_data)
# 解密内容
decrypted_data = ecc.decrypt(encrypted_data)
print("decrypted data:", decrypted_data) | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/ecc.py | ecc.py |
import hashlib
from typing import Union
def hash_password(htype: str, data: Union[str, bytes], salt: str = None):
"""
使用sha1加密算法加密数据
:param htype: 加密类型
:param data: 要加密的数据
:param salt: 盐值
:return: 加密后的字符串
"""
# 校验类型
if htype not in ["md5", "sha1", "sha256", "sha512"]:
# TODO: 抛出异常
print("不支持的加密类型")
return
# 创建加密对象
m = None
if salt is not None:
m = hashlib.new(htype, salt.encode())
else:
m = hashlib.new(htype)
# 加密
if isinstance(data, str):
m.update(data.encode())
elif isinstance(data, bytes):
m.update(data)
else:
# TODO:抛出异常
print("参数错误,data应该是str类型或者bytes类型")
# 转换为16进制字符串
result = m.hexdigest()
# 返回加密后的字符串
return result
def md5(data: Union[str, bytes], salt: str = None):
"""
使用md5算法对数据进行加密
:param data: 要加密的数据
:param salt: 增加安全性的盐值
:return: 加密后的字符串
"""
return hash_password("md5", data, salt)
def md5_verify(data, md5_str, salt: str = None):
"""
校验数据和md5加密后的字符串是否一致
:param data: 数据
:param md5_str: md5加密后的字符串
:param salt: 增加安全性的盐值
:return: 校验结果
"""
result = md5(data, salt)
return result == md5_str
def sha1(data: Union[str, bytes], salt: str = None):
"""
使用sha1加密算法加密数据
:param data: 要加密的数据
:param salt: 盐值
:return: 加密后的字符串
"""
return hash_password("sha1", data, salt)
def sha1_verify(data, password, salt: str = None):
result = sha1(data, salt)
return result == password
def sha256(data: Union[str, bytes], salt: str = None):
"""
使用sha256加密算法加密数据
:param data: 要加密的数据
:param salt: 盐值
:return: 加密后的字符串
"""
return hash_password("sha256", data, salt)
def sha256_verify(data, password, salt: str = None):
result = sha256(data, salt)
return result == password
def sha512(data: Union[str, bytes], salt: str = None):
"""
使用sha512加密算法加密数据
:param data: 要加密的数据
:param salt: 盐值
:return: 加密后的字符串
"""
return hash_password("sha512", data, salt)
def sha512_verify(data, password, salt: str = None):
result = sha512(data, salt)
return result == password
if __name__ == '__main__':
# 不加盐
print(md5("123456"))
print(md5_verify("123456", md5("123456")))
# 加盐
print(md5("123456", "salt"))
print(md5_verify("123456", md5("123456", "salt"), "salt"))
# 不加盐
print(sha1("abc"))
print(sha1_verify("123456", sha1("123456")))
# 加盐
print(sha1("123456", "salt"))
print(sha1_verify("123456", sha1("123456", "salt"), "salt"))
# 不加盐
print(sha256("abc"))
print(sha256_verify("123456", sha256("123456")))
# 加盐
print(sha256("123456", "salt"))
print(sha256_verify("123456", sha256("123456", "salt"), "salt"))
# 不加盐
print(sha512("abc"))
print(sha512_verify("123456", sha512("123456")))
# 加盐
print(sha512("123456", "salt"))
print(sha512_verify("123456", sha512("123456", "salt"), "salt")) | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/hash.py | hash.py |
import typing
class NotRelativePrimeError(ValueError):
def __init__(self, a: int, b: int, d: int, msg: str = "") -> None:
super().__init__(msg or "%d and %d are not relatively prime, divider=%i" % (a, b, d))
self.a = a
self.b = b
self.d = d
def bit_size(num: int) -> int:
"""
Number of bits needed to represent a integer excluding any prefix
0 bits.
Usage::
>>> bit_size(1023)
10
>>> bit_size(1024)
11
>>> bit_size(1025)
11
:param num:
Integer value. If num is 0, returns 0. Only the absolute value of the
number is considered. Therefore, signed integers will be abs(num)
before the number's bit length is determined.
:returns:
Returns the number of bits in the integer.
"""
try:
return num.bit_length()
except AttributeError as ex:
raise TypeError("bit_size(num) only supports integers, not %r" % type(num)) from ex
def byte_size(number: int) -> int:
"""
Returns the number of bytes required to hold a specific long number.
The number of bytes is rounded up.
Usage::
>>> byte_size(1 << 1023)
128
>>> byte_size((1 << 1024) - 1)
128
>>> byte_size(1 << 1024)
129
:param number:
An unsigned integer
:returns:
The number of bytes required to hold a specific long number.
"""
if number == 0:
return 1
return ceil_div(bit_size(number), 8)
def ceil_div(num: int, div: int) -> int:
"""
Returns the ceiling function of a division between `num` and `div`.
Usage::
>>> ceil_div(100, 7)
15
>>> ceil_div(100, 10)
10
>>> ceil_div(1, 4)
1
:param num: Division's numerator, a number
:param div: Division's divisor, a number
:return: Rounded up result of the division between the parameters.
"""
quanta, mod = divmod(num, div)
if mod:
quanta += 1
return quanta
def extended_gcd(a: int, b: int) -> typing.Tuple[int, int, int]:
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a # Remember original a/b to remove
ob = b # negative values from return results
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)), x)
(y, ly) = ((ly - (q * y)), y)
if lx < 0:
lx += ob # If neg wrap modulo original b
if ly < 0:
ly += oa # If neg wrap modulo original a
return a, lx, ly # Return only positive values
def inverse(x: int, n: int) -> int:
"""Returns the inverse of x % n under multiplication, a.k.a x^-1 (mod n)
>>> inverse(7, 4)
3
>>> (inverse(143, 4) * 143) % 4
1
"""
(divider, inv, _) = extended_gcd(x, n)
if divider != 1:
raise NotRelativePrimeError(x, n, divider)
return inv
def crt(a_values: typing.Iterable[int], modulo_values: typing.Iterable[int]) -> int:
"""Chinese Remainder Theorem.
Calculates x such that x = a[i] (mod m[i]) for each i.
:param a_values: the a-values of the above equation
:param modulo_values: the m-values of the above equation
:returns: x such that x = a[i] (mod m[i]) for each i
>>> crt([2, 3], [3, 5])
8
>>> crt([2, 3, 2], [3, 5, 7])
23
>>> crt([2, 3, 0], [7, 11, 15])
135
"""
m = 1
x = 0
for modulo in modulo_values:
m *= modulo
for (m_i, a_i) in zip(modulo_values, a_values):
M_i = m // m_i
inv = inverse(M_i, m_i)
x = (x + a_i * M_i * inv) % m
return x
if __name__ == "__main__":
import doctest
doctest.testmod() | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/libs/rsa/common.py | common.py |
import sys
from optparse import OptionParser
from . import key
def private_to_public() -> None:
"""Reads a private key and outputs the corresponding public key."""
# Parse the CLI options
parser = OptionParser(
usage="usage: %prog [options]",
description="Reads a private key and outputs the "
"corresponding public key. Both private and public keys use "
"the format described in PKCS#1 v1.5",
)
parser.add_option(
"-i",
"--input",
dest="infilename",
type="string",
help="Input filename. Reads from stdin if not specified",
)
parser.add_option(
"-o",
"--output",
dest="outfilename",
type="string",
help="Output filename. Writes to stdout of not specified",
)
parser.add_option(
"--inform",
dest="inform",
help="key format of input - default PEM",
choices=("PEM", "DER"),
default="PEM",
)
parser.add_option(
"--outform",
dest="outform",
help="key format of output - default PEM",
choices=("PEM", "DER"),
default="PEM",
)
(cli, cli_args) = parser.parse_args(sys.argv)
# Read the input data
if cli.infilename:
print(
"Reading private key from %s in %s format" % (cli.infilename, cli.inform),
file=sys.stderr,
)
with open(cli.infilename, "rb") as infile:
in_data = infile.read()
else:
print("Reading private key from stdin in %s format" % cli.inform, file=sys.stderr)
in_data = sys.stdin.read().encode("ascii")
assert type(in_data) == bytes, type(in_data)
# Take the public fields and create a public key
priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
# Save to the output file
out_data = pub_key.save_pkcs1(cli.outform)
if cli.outfilename:
print(
"Writing public key to %s in %s format" % (cli.outfilename, cli.outform),
file=sys.stderr,
)
with open(cli.outfilename, "wb") as outfile:
outfile.write(out_data)
else:
print("Writing public key to stdout in %s format" % cli.outform, file=sys.stderr)
sys.stdout.write(out_data.decode("ascii")) | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/libs/rsa/util.py | util.py |
import base64
import typing
# Should either be ASCII strings or bytes.
FlexiText = typing.Union[str, bytes]
def _markers(pem_marker: FlexiText) -> typing.Tuple[bytes, bytes]:
"""
Returns the start and end PEM markers, as bytes.
"""
if not isinstance(pem_marker, bytes):
pem_marker = pem_marker.encode("ascii")
return (
b"-----BEGIN " + pem_marker + b"-----",
b"-----END " + pem_marker + b"-----",
)
def _pem_lines(contents: bytes, pem_start: bytes, pem_end: bytes) -> typing.Iterator[bytes]:
"""Generator over PEM lines between pem_start and pem_end."""
in_pem_part = False
seen_pem_start = False
for line in contents.splitlines():
line = line.strip()
# Skip empty lines
if not line:
continue
# Handle start marker
if line == pem_start:
if in_pem_part:
raise ValueError('Seen start marker "%r" twice' % pem_start)
in_pem_part = True
seen_pem_start = True
continue
# Skip stuff before first marker
if not in_pem_part:
continue
# Handle end marker
if in_pem_part and line == pem_end:
in_pem_part = False
break
# Load fields
if b":" in line:
continue
yield line
# Do some sanity checks
if not seen_pem_start:
raise ValueError('No PEM start marker "%r" found' % pem_start)
if in_pem_part:
raise ValueError('No PEM end marker "%r" found' % pem_end)
def load_pem(contents: FlexiText, pem_marker: FlexiText) -> bytes:
"""Loads a PEM file.
:param contents: the contents of the file to interpret
:param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
:return: the base64-decoded content between the start and end markers.
@raise ValueError: when the content is invalid, for example when the start
marker cannot be found.
"""
# We want bytes, not text. If it's text, it can be converted to ASCII bytes.
if not isinstance(contents, bytes):
contents = contents.encode("ascii")
(pem_start, pem_end) = _markers(pem_marker)
pem_lines = [line for line in _pem_lines(contents, pem_start, pem_end)]
# Base64-decode the contents
pem = b"".join(pem_lines)
return base64.standard_b64decode(pem)
def save_pem(contents: bytes, pem_marker: FlexiText) -> bytes:
"""Saves a PEM file.
:param contents: the contents to encode in PEM format
:param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
:return: the base64-encoded content between the start and end markers, as bytes.
"""
(pem_start, pem_end) = _markers(pem_marker)
b64 = base64.standard_b64encode(contents).replace(b"\n", b"")
pem_lines = [pem_start]
for block_start in range(0, len(b64), 64):
block = b64[block_start : block_start + 64]
pem_lines.append(block)
pem_lines.append(pem_end)
pem_lines.append(b"")
return b"\n".join(pem_lines) | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/libs/rsa/pem.py | pem.py |
from . import common, randnum
__all__ = ["getprime", "are_relatively_prime"]
def gcd(p: int, q: int) -> int:
"""Returns the greatest common divisor of p and q
>>> gcd(48, 180)
12
"""
while q != 0:
(p, q) = (q, p % q)
return p
def get_primality_testing_rounds(number: int) -> int:
"""Returns minimum number of rounds for Miller-Rabing primality testing,
based on number bitsize.
According to NIST FIPS 186-4, Appendix C, Table C.3, minimum number of
rounds of M-R testing, using an error probability of 2 ** (-100), for
different p, q bitsizes are:
* p, q bitsize: 512; rounds: 7
* p, q bitsize: 1024; rounds: 4
* p, q bitsize: 1536; rounds: 3
See: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf
"""
# Calculate number bitsize.
bitsize = common.bit_size(number)
# Set number of rounds.
if bitsize >= 1536:
return 3
if bitsize >= 1024:
return 4
if bitsize >= 512:
return 7
# For smaller bitsizes, set arbitrary number of rounds.
return 10
def miller_rabin_primality_testing(n: int, k: int) -> bool:
"""Calculates whether n is composite (which is always correct) or prime
(which theoretically is incorrect with error probability 4**-k), by
applying Miller-Rabin primality testing.
For reference and implementation example, see:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
:param n: Integer to be tested for primality.
:type n: int
:param k: Number of rounds (witnesses) of Miller-Rabin testing.
:type k: int
:return: False if the number is composite, True if it's probably prime.
:rtype: bool
"""
# prevent potential infinite loop when d = 0
if n < 2:
return False
# Decompose (n - 1) to write it as (2 ** r) * d
# While d is even, divide it by 2 and increase the exponent.
d = n - 1
r = 0
while not (d & 1):
r += 1
d >>= 1
# Test k witnesses.
for _ in range(k):
# Generate random integer a, where 2 <= a <= (n - 2)
a = randnum.randint(n - 3) + 1
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == 1:
# n is composite.
return False
if x == n - 1:
# Exit inner loop and continue with next witness.
break
else:
# If loop doesn't break, n is composite.
return False
return True
def is_prime(number: int) -> bool:
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(2)
True
>>> is_prime(42)
False
>>> is_prime(41)
True
"""
# Check for small numbers.
if number < 10:
return number in {2, 3, 5, 7}
# Check for even numbers.
if not (number & 1):
return False
# Calculate minimum number of rounds.
k = get_primality_testing_rounds(number)
# Run primality testing with (minimum + 1) rounds.
return miller_rabin_primality_testing(number, k + 1)
def getprime(nbits: int) -> int:
"""Returns a prime number that can be stored in 'nbits' bits.
>>> p = getprime(128)
>>> is_prime(p-1)
False
>>> is_prime(p)
True
>>> is_prime(p+1)
False
>>> from rsa import common
>>> common.bit_size(p) == 128
True
"""
assert nbits > 3 # the loop will hang on too small numbers
while True:
integer = randnum.read_random_odd_int(nbits)
# Test for primeness
if is_prime(integer):
return integer
# Retry if not prime
def are_relatively_prime(a: int, b: int) -> bool:
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
True
>>> are_relatively_prime(2, 4)
False
"""
d = gcd(a, b)
return d == 1
if __name__ == "__main__":
print("Running doctests 1000x or until failure")
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 100 == 0 and count:
print("%i times" % count)
print("Doctests done") | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/libs/rsa/prime.py | prime.py |
import hashlib
import os
import sys
import typing
from hmac import compare_digest
from . import common, transform, core, key
HashType = typing.Any
# ASN.1 codes that describe the hash algorithm used.
HASH_ASN1 = {
"MD5": b"\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10",
"SHA-1": b"\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14",
"SHA-224": b"\x30\x2d\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04\x05\x00\x04\x1c",
"SHA-256": b"\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20",
"SHA-384": b"\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30",
"SHA-512": b"\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40",
}
HASH_METHODS: typing.Dict[str, typing.Callable[[], HashType]] = {
"MD5": hashlib.md5,
"SHA-1": hashlib.sha1,
"SHA-224": hashlib.sha224,
"SHA-256": hashlib.sha256,
"SHA-384": hashlib.sha384,
"SHA-512": hashlib.sha512,
}
if sys.version_info >= (3, 6):
# Python 3.6 introduced SHA3 support.
HASH_ASN1.update(
{
"SHA3-256": b"\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x08\x05\x00\x04\x20",
"SHA3-384": b"\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x09\x05\x00\x04\x30",
"SHA3-512": b"\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x0a\x05\x00\x04\x40",
}
)
HASH_METHODS.update(
{
"SHA3-256": hashlib.sha3_256,
"SHA3-384": hashlib.sha3_384,
"SHA3-512": hashlib.sha3_512,
}
)
class CryptoError(Exception):
"""Base class for all exceptions in this module."""
class DecryptionError(CryptoError):
"""Raised when decryption fails."""
class VerificationError(CryptoError):
"""Raised when verification fails."""
def _pad_for_encryption(message: bytes, target_length: int) -> bytes:
r"""Pads the message for encryption, returning the padded message.
:return: 00 02 RANDOM_DATA 00 MESSAGE
>>> block = _pad_for_encryption(b'hello', 16)
>>> len(block)
16
>>> block[0:2]
b'\x00\x02'
>>> block[-6:]
b'\x00hello'
"""
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError(
"%i bytes needed for message, but there is only"
" space for %i" % (msglength, max_msglength)
)
# Get random padding
padding = b""
padding_length = target_length - msglength - 3
# We remove 0-bytes, so we'll end up with less padding than we've asked for,
# so keep adding data until we're at the correct length.
while len(padding) < padding_length:
needed_bytes = padding_length - len(padding)
# Always read at least 8 bytes more than we need, and trim off the rest
# after removing the 0-bytes. This increases the chance of getting
# enough bytes, especially when needed_bytes is small
new_padding = os.urandom(needed_bytes + 5)
new_padding = new_padding.replace(b"\x00", b"")
padding = padding + new_padding[:needed_bytes]
assert len(padding) == padding_length
return b"".join([b"\x00\x02", padding, b"\x00", message])
def _pad_for_signing(message: bytes, target_length: int) -> bytes:
r"""Pads the message for signing, returning the padded message.
The padding is always a repetition of FF bytes.
:return: 00 01 PADDING 00 MESSAGE
>>> block = _pad_for_signing(b'hello', 16)
>>> len(block)
16
>>> block[0:2]
b'\x00\x01'
>>> block[-6:]
b'\x00hello'
>>> block[2:-6]
b'\xff\xff\xff\xff\xff\xff\xff\xff'
"""
max_msglength = target_length - 11
msglength = len(message)
if msglength > max_msglength:
raise OverflowError(
"%i bytes needed for message, but there is only"
" space for %i" % (msglength, max_msglength)
)
padding_length = target_length - msglength - 3
return b"".join([b"\x00\x01", padding_length * b"\xff", b"\x00", message])
def encrypt(message: bytes, pub_key: key.PublicKey) -> bytes:
"""Encrypts the given message using PKCS#1 v1.5
:param message: the message to encrypt. Must be a byte string no longer than
``k-11`` bytes, where ``k`` is the number of bytes needed to encode
the ``n`` component of the public key.
:param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
:raise OverflowError: when the message is too large to fit in the padded
block.
>>> from rsa import key, common
>>> (pub_key, priv_key) = key.newkeys(256)
>>> message = b'hello'
>>> crypto = encrypt(message, pub_key)
The crypto text should be just as long as the public key 'n' component:
>>> len(crypto) == common.byte_size(pub_key.n)
True
"""
keylength = common.byte_size(pub_key.n)
padded = _pad_for_encryption(message, keylength)
payload = transform.bytes2int(padded)
encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
block = transform.int2bytes(encrypted, keylength)
return block
def decrypt(crypto: bytes, priv_key: key.PrivateKey) -> bytes:
r"""Decrypts the given message using PKCS#1 v1.5
The decryption is considered 'failed' when the resulting cleartext doesn't
start with the bytes 00 02, or when the 00 byte between the padding and
the message cannot be found.
:param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
:param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
:raise DecryptionError: when the decryption fails. No details are given as
to why the code thinks the decryption fails, as this would leak
information about the private key.
>>> import rsa
>>> (pub_key, priv_key) = rsa.newkeys(256)
It works with strings:
>>> crypto = encrypt(b'hello', pub_key)
>>> decrypt(crypto, priv_key)
b'hello'
And with binary data:
>>> crypto = encrypt(b'\x00\x00\x00\x00\x01', pub_key)
>>> decrypt(crypto, priv_key)
b'\x00\x00\x00\x00\x01'
Altering the encrypted information will *likely* cause a
:py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
:py:func:`rsa.sign`.
.. warning::
Never display the stack trace of a
:py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
code the exception occurred, and thus leaks information about the key.
It's only a tiny bit of information, but every bit makes cracking the
keys easier.
>>> crypto = encrypt(b'hello', pub_key)
>>> crypto = crypto[0:5] + b'X' + crypto[6:] # change a byte
>>> decrypt(crypto, priv_key)
Traceback (most recent call last):
...
rsa.pkcs1.DecryptionError: Decryption failed
"""
blocksize = common.byte_size(priv_key.n)
encrypted = transform.bytes2int(crypto)
# 解密对象
decrypted = priv_key.blinded_decrypt(encrypted)
cleartext = transform.int2bytes(decrypted, blocksize)
# Detect leading zeroes in the crypto. These are not reflected in the
# encrypted value (as leading zeroes do not influence the value of an
# integer). This fixes CVE-2020-13757.
if len(crypto) > blocksize:
# This is operating on public information, so doesn't need to be constant-time.
raise DecryptionError("Decryption failed")
# If we can't find the cleartext marker, decryption failed.
cleartext_marker_bad = not compare_digest(cleartext[:2], b"\x00\x02")
# Find the 00 separator between the padding and the message
sep_idx = cleartext.find(b"\x00", 2)
# sep_idx indicates the position of the `\x00` separator that separates the
# padding from the actual message. The padding should be at least 8 bytes
# long (see https://tools.ietf.org/html/rfc8017#section-7.2.2 step 3), which
# means the separator should be at least at index 10 (because of the
# `\x00\x02` marker that precedes it).
sep_idx_bad = sep_idx < 10
anything_bad = cleartext_marker_bad | sep_idx_bad
# 加密失败
if anything_bad:
raise DecryptionError("加密失败")
return cleartext[sep_idx + 1:]
def sign_hash(hash_value: bytes, priv_key: key.PrivateKey, hash_method: str) -> bytes:
"""Signs a precomputed hash with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param hash_value: A precomputed hash to sign (ignores message).
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
"""
# Get the ASN1 code for this hash method
if hash_method not in HASH_ASN1:
raise ValueError("Invalid hash method: %s" % hash_method)
asn1code = HASH_ASN1[hash_method]
# Encrypt the hash with the private key
cleartext = asn1code + hash_value
keylength = common.byte_size(priv_key.n)
padded = _pad_for_signing(cleartext, keylength)
payload = transform.bytes2int(padded)
encrypted = priv_key.blinded_encrypt(payload)
block = transform.int2bytes(encrypted, keylength)
return block
def sign(message: bytes, priv_key: key.PrivateKey, hash_method: str) -> bytes:
"""Signs the message with the private key.
Hashes the message, then signs the hash with the given key. This is known
as a "detached signature", because the message itself isn't altered.
:param message: the message to sign. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param priv_key: the :py:class:`rsa.PrivateKey` to sign with
:param hash_method: the hash method used on the message. Use 'MD5', 'SHA-1',
'SHA-224', SHA-256', 'SHA-384' or 'SHA-512'.
:return: a message signature block.
:raise OverflowError: if the private key is too small to contain the
requested hash.
"""
msg_hash = compute_hash(message, hash_method)
return sign_hash(msg_hash, priv_key, hash_method)
def verify(message: bytes, signature: bytes, pub_key: key.PublicKey) -> str:
"""Verifies that the signature matches the message.
The hash method is detected automatically from the signature.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:raise VerificationError: when the signature doesn't match the message.
:returns: the name of the used hash.
"""
keylength = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, keylength)
# Get the hash method
method_name = _find_method_hash(clearsig)
message_hash = compute_hash(message, method_name)
# Reconstruct the expected padded hash
cleartext = HASH_ASN1[method_name] + message_hash
expected = _pad_for_signing(cleartext, keylength)
if len(signature) != keylength:
raise VerificationError("签名校验失败")
# Compare with the signed one
if expected != clearsig:
raise VerificationError("签名校验失败")
return method_name
def find_signature_hash(signature: bytes, pub_key: key.PublicKey) -> str:
"""Returns the hash name detected from the signature.
If you also want to verify the message, use :py:func:`rsa.verify()` instead.
It also returns the name of the used hash.
:param signature: the signature block, as created with :py:func:`rsa.sign`.
:param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
:returns: the name of the used hash.
"""
keylength = common.byte_size(pub_key.n)
encrypted = transform.bytes2int(signature)
decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
clearsig = transform.int2bytes(decrypted, keylength)
return _find_method_hash(clearsig)
def yield_fixedblocks(infile: typing.BinaryIO, blocksize: int) -> typing.Iterator[bytes]:
"""Generator, yields each block of ``blocksize`` bytes in the input file.
:param infile: file to read and separate in blocks.
:param blocksize: block size in bytes.
:returns: a generator that yields the contents of each block
"""
while True:
block = infile.read(blocksize)
read_bytes = len(block)
if read_bytes == 0:
break
yield block
if read_bytes < blocksize:
break
def compute_hash(message: typing.Union[bytes, typing.BinaryIO], method_name: str) -> bytes:
"""Returns the message digest.
:param message: the signed message. Can be an 8-bit string or a file-like
object. If ``message`` has a ``read()`` method, it is assumed to be a
file-like object.
:param method_name: the hash method, must be a key of
:py:const:`HASH_METHODS`.
"""
if method_name not in HASH_METHODS:
raise ValueError("Invalid hash method: %s" % method_name)
method = HASH_METHODS[method_name]
hasher = method()
if isinstance(message, bytes):
hasher.update(message)
else:
assert hasattr(message, "read") and hasattr(message.read, "__call__")
# read as 1K blocks
for block in yield_fixedblocks(message, 1024):
hasher.update(block)
return hasher.digest()
def _find_method_hash(clearsig: bytes) -> str:
"""Finds the hash method.
:param clearsig: full padded ASN1 and hash.
:return: the used hash method.
:raise VerificationFailed: when the hash method cannot be found
"""
for (hashname, asn1code) in HASH_ASN1.items():
if asn1code in clearsig:
return hashname
raise VerificationError("Verification failed")
__all__ = [
"encrypt",
"decrypt",
"sign",
"verify",
"DecryptionError",
"VerificationError",
"CryptoError",
]
if __name__ == "__main__":
print("Running doctests 1000x or until failure")
import doctest
for count in range(1000):
(failures, tests) = doctest.testmod()
if failures:
break
if count % 100 == 0 and count:
print("%i times" % count)
print("Doctests done") | zdppy-password | /zdppy_password-0.1.0-py3-none-any.whl/zdppy_password/libs/rsa/pkcs1.py | pkcs1.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.