code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import json
__all__ = [
"queryset"
]
class MongoFilter:
"""
"""
def __init__(self):
self.fields_utils = []
self.list_refences = []
self.dict_embedded = {}
self.params = dict
self.principal_models = object
self.refence_models = object
self.refence_instance = object
self.refence_field = "ReferenceField"
self.embedded_field = "ListField"
def main(self,
principal_models: dict,
refence_models: dict = None,
params: dict = dict):
"""
"""
self.principal_models = principal_models
self.refence_models = refence_models
self.params = params
self.clean_data_params
self.list_field_models
self.list_references_field
self.dict_embedded_document_field
self.instances_refences
# query principal con las referencias
queryset = principal_models.objects.filter(**self.params)
if len(queryset) > 1:
# si la query retorna mas 1 objeto no se realizan
# acciones y se retorna el mismo object
return self.multiples_objects(queryset)
# agrega los embebidos al objeto principal
queryset = self.instances_embedded(queryset)
return queryset
@property
def clean_data_params(self):
"""
Elimina las keys que contienen valores None
de esta forma no se afeacta la query principal, esto
es util cuando se pasan parametros de busqueda serializados.
:: str_bool: cambia los datos str a bool
:: clean_params : elimina los campos None
"""
clean_params = {value: index for value,
index in self.params.items() if index is not None}
str_bool = {value: json.loads(
index.lower()) for value, index in self.params.items()
if index in ['true', 'false']
}
self.params = dict(clean_params, **str_bool)
@property
def list_field_models(self):
"""
Itera sobre self.params, valida los campos que pertenecen
al modelo y los agrega a la lista self.fields_utils
"""
for fields in self.params:
try:
getattr(self.principal_models, fields)
self.fields_utils.append(fields)
except AttributeError:
try:
_fields = fields.split("_")
getattr(self.principal_models, _fields[0])
self.fields_utils.append(fields)
except AttributeError:
pass
@property
def list_references_field(self):
"""
Selecciona los objetos de tipo ReferenceField, los agrega a
la lista self.list_refences
"""
for fields in self.fields_utils:
try:
type_field = getattr(self.principal_models, fields)
if self.refence_field in str(type(type_field)):
self.list_refences.append(fields)
except AttributeError:
self.second_search_refence(
fields, self.refence_field, self.list_refences)
@property
def dict_embedded_document_field(self):
"""
Selecciona los objetos de tipo List, los agrega a
la lista self.dict_embedded
"""
for fields in self.fields_utils:
try:
type_field = getattr(self.principal_models, fields)
if self.embedded_field in str(type(type_field)):
self.dict_embedded[fields] = self.params[fields]
self.params.pop(fields)
except AttributeError:
self.second_search_embedded(
fields, self.embedded_field,
self.dict_embedded
)
order_embedded = {}
for index, value in self.dict_embedded.items():
name = index.split("_")
key = "_".join(name[1:])
if name[0] in order_embedded:
order_embedded[name[0]][key] = value
else:
order_embedded[name[0]] = {key: value}
self.dict_embedded = order_embedded
@property
def instances_refences(self):
"""
Itera sobre list_refences, que contiene las keys de modelos
ReferenceField, separa la data en 2 variables una contiene el nombre
del campo a filtrar y la otra el valor, realizando una busqueda de tipo
iexact, este no diferencia entre mayuscusulas-minisculas, si la
consulta es True, se actualiza el valor de la llave de referencia
por la instancia del objeto en self.params,
para relizar la consulta global:
return::
params={
"field_refence":instance_refence_field
}
- variables de referencia
- params : almacena el valor de busqueda
- items : almacena el key de busqueda
- reference_name : contiene el campo de ReferenceField
"""
data_filter = dict
for refence in self.list_refences:
# separa los campos y valida que existan en el modelo
params = self.params[refence]
field_refence = refence.split('_')
items = "{}__{}".format(field_refence[-1], "iexact")
reference_name = field_refence[0]
# data_filter ej:{uuid:"52a5629c-3fb4-4267-bc39-9bc3cbb7ef50"}
data_filter = {items: params}
# eliminar campos de referencia
self.params.pop(refence)
try:
instance = {
reference_name: self.refence_models.objects.get(
**data_filter)
}
# agregar dict con instancia del campo referencia
self.params = dict(self.params, **instance)
except Exception:
pass
def instances_embedded(self, query):
"""
Itera sobre la lista de keys embedded, y separa
en 2 variables el nombre del campo y el valor para
buscar en el Embebido del objeto de consulta principal
"""
if not query:
return []
result = query
for embedded_model, values in self.dict_embedded.items():
# separa los campos y valida que existan en el modelo
result = self.process_embedded(
query, embedded_model, values)
return result
def process_embedded(self, query, embedded_model, params):
"""
Filtra los embedded y los agrega al objeto de busqueda
principal
"""
query = query[0]
dict_embedded = []
validated_query = 0
embedded_query = getattr(query, embedded_model)
for embedded in embedded_query:
validated_query = 0
for index, value in params.items():
if str(getattr(embedded, index)) == str(value):
validated_query += 1
if len(params) == validated_query:
dict_embedded.append(embedded)
query[embedded_model] = dict_embedded
return [query]
def multiples_objects(self, query):
"""
"""
return query
def second_search_refence(self, _iter, value, list_data):
try:
_fields = _iter.split("_")
type_field = getattr(self.principal_models, _fields[0])
if value in str(type(type_field)):
list_data.append(_iter)
except AttributeError:
pass
def second_search_embedded(self, _iter, value, list_data):
try:
_fields = _iter.split("_")
type_field = getattr(self.principal_models, _fields[0])
if value in str(type(type_field)):
list_data[_iter] = self.params[_iter]
self.params.pop(_iter)
except AttributeError:
pass
def queryset(principal_models: dict,
refence_models: dict = None,
params: dict = dict):
"""
Permite filtrar multiples objetos de primer nivel de tipo Embebido
y 1 de tipo RefenceField, apartir de un dict agregando como
primera palabra el nombre del modelo seguido del campo
de busqueda, modeloA_campo_busqueda.
Parametros:
:: principal_models: Modelo que contiene las referencias y embebidos
:: refence_models: Modelo RefenceField
:: params: dict con keys de filtros
Ejemplo:
- model_a:
{
"id":1,
"name":"abc",
"nid":"12323",
"addres":EmbeddedDocumentField(model_b),
"nid_type":ReferenceField(model_c, dbref=True)
}
- model_b:
[
{
"id":1,
"name":"cll qwer",
"description":"
},
{
"id":2,
"name":"cll abc",
"description":"
}
]
- model_c:
{
{
"id":1,
"name":"C.C",
"description":"
},
{
"id":2,
"name":"C.E",
"description":"
}
}
- Params:{
"id":1,
"model_b_name":"cll abc",
"model_c_name":"C.C"
}
:: MongoFilter().queryset(model_a,model_c,Params)
- repuesta:
{
"id":1,
"name":"",
"addres":[addres[0]],
"nid_type":(object)
}
"""
result = MongoFilter().main(
principal_models,
refence_models,
params
)
return result | zmongo-filter | /zmongo_filter-0.0.6-py3-none-any.whl/zmongo/filter.py | filter.py |
__author__ = 'Zhang Fan'
from pymongo import MongoClient
from zretry import retry
_retry_func_list = []
def _except_retry(func):
_retry_func_list.append(func.__name__)
return func
class mongo_inst():
def __init__(self, host: str, port: int, db: str = 'test', collname=None, user=None, password=None,
retry_interval=1, max_attempt_count=5,
**kw):
'''
创建一个mongo客户端
:param host: ip地址
:param port: 端口
:param db: 数据库
:param collname: 文档名
:param user: 用户名
:param password: 密码
:param retry_interval: 尝试等待时间
:param max_attempt_count: 最大尝试次数
:param kw: 其他参数
'''
self._conn = MongoClient(host=host, port=port, **kw)
if user and password:
self._conn[db].authenticate(user, password)
self.change_db(db, collname)
for retry_func_name in _retry_func_list:
func = getattr(self, retry_func_name)
decorator = retry(interval=retry_interval, max_attempt_count=max_attempt_count)(func)
setattr(self, retry_func_name, decorator)
def change_db(self, db, collname=None):
self.db_name = db
self.collname = collname
self.coll = self._conn[db][collname] if collname else None
def change_coll(self, collname):
self.collname = collname
self.coll = self._conn[self.db_name][collname]
@_except_retry
def save(self, item):
# 保存数据,成功返回_id,失败报错
return self.coll.insert(item)
def all_data(self, collname=None, skip_count=0):
assert isinstance(skip_count, int) and skip_count >= 0, 'skip必须是整数且不能小于0'
if collname:
self.change_coll(collname)
datas = self.coll.find()
if skip_count:
datas.skip(skip_count)
return datas
def find(self, *args, **kw):
return self.coll.find(*args, **kw)
def find_id(self, _id):
return self.coll.find({'_id': _id})
def find_fields(self, *fields):
return self.coll.find({}, {field: True for field in fields})
def del_id(self, _id):
return self.coll.delete_one({'_id': _id}) | zmongo | /zmongo-0.1.0-py3-none-any.whl/zmongo.py | zmongo.py |
Unofficial zmote.io interface
=======================
This module serves as a Python interface for the [zmote.io](http://zmote.io/)
IoT gadget- it's basically a USB-powered, WiFI connected IR blaster.
The module was written using the
[zmote.io API documentation](http://www.zmote.io/apis) and tested against two
real devices.
----
#### Overview
This module supports the discovery of devices via multicast and interacting
with devices via HTTP or TCP; in all instances communication is directly
with the device (and not via the zmote.io cloud application).
#### To install for use standalone/in your project
<code>pip install zmote</code>
##### To passively discover all devices on your network until timeout (30 seconds)
<code>python -m zmote.discoverer</code>
##### To actively discover two devices on your local network
<code>python -m zmote.discoverer -l 2 -a</code>
##### To passively discover a particular device on your local network (e.g. in case of DHCP)
<code>python -m zmote.discoverer -u CI001f1234</code>
##### To put a device into learn mode via TCP
<code>python -m zmote.connector -t tcp -d 192.168.1.1 -c learn</code>
##### To tell a device to send an IR signal via HTTP
<code>python -m zmote.connector -t http -d 192.168.1.1 -c send -p 1:1,0,36000,1,1,32,32,64,32,32,64,32,3264</code>
### To install for further development
Prerequisites:
* [virtualenvwrapper](https://virtualenvwrapper.readthedocs.io/en/latest/])
#### Clone the repo
<code>git clone https://github.com/initialed85/zmote
cd zmote</code>
#### Build the virtualenv
<code>mkvirtualenv zmote
pip install -r requirements-dev.txt</code>
#### Run the tests
<code>py.test -v</code>
| zmote | /zmote-2017.7.tar.gz/zmote-2017.7/README.md | README.md |
hnnnn12 = '%I:%M %p | %d %B, %Y' # 12h, human, day then month name # 02:34 PM | 15 January, 2023 # 00
hnndn12 = '%I:%M %p | %d/%m/%Y' # 12h, human, day then month digit # 02:34 PM | 15/01/2023 # 01
hnnns12 = '%I:%M:%S %p | %d %B, %Y' # 12h, human, day then month name, seconds # 02:34:56 PM | 15 January, 2023 # 02
hnnds12 = '%I:%M:%S %p | %d/%m/%Y' # 12h, human, day then month digit, seconds # 02:34:56 PM | 15/01/2023 # 03
snnnn12 = '%I.%M_%p_%d-%B-%Y' # 12h, system, day then month name # 02.34_PM_15-January-2023 # 04
snndn12 = '%I.%M_%p_%d-%m-%Y' # 12h, system, day then month digit # 02.34_PM_15-01-2023 # 05
snnns12 = '%I.%M.%S_%p_%d-%B-%Y' # 12h, system, day then month name, seconds # 02.34.56_PM_15-January-2023 # 06
snnds12 = '%I.%M.%S_%p_%d-%m-%Y' # 12h, system, day then month digit, seconds # 02.34.56_PM_15-01-2023 # 07
hsnnn12 = '%d %B, %Y | %I:%M %p' # 12h, human, day then month name, date then time # 01 January, 2023 | 02:34 PM # 08
hsndn12 = '%d/%m/%Y | %I:%M %p' # 12h, human, day then month digit, date then time # 01/01/2023 | 02:34 PM # 09
hsnns12 = '%d %B, %Y | %I:%M:%S %p' # 12h, human, day then month name, seconds, date then time # 15 January, 2023 | 02:34:56 PM # 10
hsnds12 = '%d/%m/%Y | %I:%M:%S %p' # 12h, human, day then month digit, seconds, date then time # 15/01/2023 | 02:34:56 PM # 11
ssnnn12 = '%d-%B-%Y_%I.%M_%p' # 12h, system, day then month name, date then time # 15-January-2023_02.34_PM # 12
ssndn12 = '%d-%m-%Y_%I.%M_%p' # 12h, system, day then month digit, date then time # 15-01-2023_02.34_PM # 13
ssnns12 = '%d-%B-%Y_%I.%M.%S_%p' # 12h, system, day then month name, seconds, date then time # 15-January-2023_02.34.56_PM # 14
ssnds12 = '%d-%m-%Y_%I.%M.%S_%p' # 12h, system, day then month digit, seconds, date then time # 15-01-2023_02.34.56_PM # 15
hnnnn24 = '%H:%M | %d %B, %Y' # 24h, human, day then month name # 14:34 | 15 January, 2023 # 16
hnndn24 = '%H:%M | %d/%m/%Y' # 24h, human, day then month digit # 14:34 | 15/01/2023 # 17
hnnns24 = '%H:%M:%S | %d %B, %Y' # 24h, human, day then month name, seconds # 14:34:56 | 15 January, 2023 # 18
hnnds24 = '%H:%M:%S | %d/%m/%Y' # 24h, human, day then month digit, seconds # 14:34:56 | 15/01/2023 # 19
snnnn24 = '%H.%M_%d-%B-%Y' # 24h, system, day then month name # 14.34_15-January-2023 # 20
snndn24 = '%H.%M_%d-%m-%Y' # 24h, system, day then month digit # 14.34_15-01-2023 # 21
snnns24 = '%H.%M.%S_%d-%B-%Y' # 24h, system, day then month name, seconds # 14.34.56_15-January-2023 # 22
snnds24 = '%H.%M.%S_%d-%m-%Y' # 24h, system, day then month digit, seconds # 14.34.56_15-01-2023 # 23
hsnnn24 = '%d %B, %Y | %H:%M' # 24h, human, day then month name, date then time # 15 January, 2023 | 14:34 # 24
hsndn24 = '%d/%m/%Y | %H:%M' # 24h, human, day then month digit, date then time # 15/01/2023 | 14:34 # 25
hsnns24 = '%d %B, %Y | %H:%M:%S' # 24h, human, day then month name, seconds, date then time # 15 January, 2023 | 14:34:56 # 26
hsnds24 = '%d/%m/%Y | %H:%M:%S' # 24h, human, day then month digit, seconds, date then time # 15/01/2023 | 14:34:56 # 27
ssnnn24 = '%d-%B-%Y_%H.%M' # 24h, system, day then month name, date then time # 15-January-2023_14.34 # 28
ssndn24 = '%d-%m-%Y_%H.%M' # 24h, system, day then month digit, date then time # 15-01-2023_14.34 # 29
ssnns24 = '%d-%B-%Y_%H.%M.%S' # 24h, system, day then month name, seconds, date then time # 15-January-2023_14.34.56 # 30
ssnds24 = '%d-%m-%Y_%H.%M.%S' # 24h, system, day then month digit, seconds, date then time # 15-01-2023_14.34.56 # 31
hnsnn12 = '%I:%M %p | %B %d, %Y' # 12h, human, month name then day # 02:34 PM | January 15, 2023 # 32
hnsdn12 = '%I:%M %p | %m/%d/%Y' # 12h, human, month digit then day # 02:34 PM | 01/15/2023 # 33
hnsns12 = '%I:%M:%S %p | %B %d, %Y' # 12h, human, month name then day, seconds # 02:34:56 PM | January 15, 2023 # 34
hnsds12 = '%I:%M:%S %p | %m/%d/%Y' # 12h, human, month digit then day, seconds # 02:34:56 PM | 01/15/2023 # 35
snsnn12 = '%I.%M_%p_%B-%d-%Y' # 12h, system, month name then day # 02.34_PM_January-15-2023 # 36
snsdn12 = '%I.%M_%p_%m-%d-%Y' # 12h, system, month digit then day # 02.34_PM_01-15-2023 # 37
snsns12 = '%I.%M.%S_%p_%B-%d-%Y' # 12h, system, month name then day, seconds # 02.34.56_PM_January-15-2023 # 38
snsds12 = '%I.%M.%S_%p_%m-%d-%Y' # 12h, system, month digit then day, seconds # 02.34.56_PM_01-15-2023 # 39
hssnn12 = '%B %d, %Y | %I:%M %p' # 12h, human, month name then day, date then time # January 15, 2023 | 02:34 PM # 40
hssdn12 = '%m/%d/%Y | %I:%M %p' # 12h, human, month digit then day, date then time # 01/15/2023 | 02:34 PM # 41
hssns12 = '%B %d, %Y | %I:%M:%S %p' # 12h, human, month name then day, seconds, date then time # January 15, 2023 | 02:34:56 PM # 42
hssds12 = '%m/%d/%Y | %I:%M:%S %p' # 12h, human, month digit then day, seconds, date then time # 01/15/2023 | 02:34:56 PM # 43
sssnn12 = '%B-%d-%Y_%I.%M_%p' # 12h, system, month name then day, date then time # January-15-2023_02.34_PM # 44
sssdn12 = '%m-%d-%Y_%I.%M_%p' # 12h, system, month digit then day, date then time # 01-15-2023_02.34_PM # 45
sssns12 = '%B-%d-%Y_%I.%M.%S_%p' # 12h, system, month name then day, seconds, date then time # January-15-2023_02.34.56_PM # 46
sssds12 = '%m-%d-%Y_%I.%M.%S_%p' # 12h, system, month digit then day, seconds, date then time # 01-15-2023_02.34.56_PM # 47
hnsnn24 = '%H:%M | %B %d, %Y' # 24h, human, month name then day # 14:34 | January 15, 2023 # 48
hnsdn24 = '%H:%M | %m/%d/%Y' # 24h, human, month digit then day # 14:34 | 01/15/2023 # 49
hnsns24 = '%H:%M:%S | %B %d, %Y' # 24h, human, month name then day, seconds # 14:34:56 | January 15, 2023 # 50
hnsds24 = '%H:%M:%S | %m/%d/%Y' # 24h, human, month digit then day, seconds # 14:34:56 | 01/15/2023 # 51
snsnn24 = '%H.%M_%B-%d-%Y' # 24h, system, month name then day # 14.34_January-15-2023 # 52
snsdn24 = '%H.%M_%m-%d-%Y' # 24h, system, month digit then day # 14.34_01-15-2023 # 53
snsns24 = '%H.%M.%S_%B-%d-%Y' # 24h, system, month name then day, seconds # 14.34.56_January-15-2023 # 54
snsds24 = '%H.%M.%S_%m-%d-%Y' # 24h, system, month digit then day, seconds # 14.34.56_01-15-2023 # 55
hssnn24 = '%B %d, %Y | %H:%M' # 24h, human, month name then day, date then time # January 15, 2023 | 14:34 # 56
hssdn24 = '%m/%d/%Y | %H:%M' # 24h, human, month digit then day, date then time # 01/15/2023 | 14:34 # 57
hssns24 = '%B %d, %Y | %H:%M:%S' # 24h, human, month name then day, seconds, date then time # January 15, 2023 | 14:34:56 # 58
hssds24 = '%m/%d/%Y | %H:%M:%S' # 24h, human, month digit then day, seconds, date then time # 01/15/2023 | 14:34:56 # 59
sssnn24 = '%B-%d-%Y_%H.%M' # 24h, system, month name then day, date then time # January-15-2023_14.34 # 60
sssdn24 = '%m-%d-%Y_%H.%M' # 24h, system, month digit then day, date then time # 01-15-2023_14.34 # 61
sssns24 = '%B-%d-%Y_%H.%M.%S' # 24h, system, month name then day, seconds, date then time # January-15-2023_14.34.56 # 62
sssds24 = '%m-%d-%Y_%H.%M.%S' # 24h, system, month digit then day, seconds, date then time # 01-15-2023_14.34.56 # 63
hnn = '%B %d, %Y' # human, month name then day # January 15, 2023 # 64
hnd = '%m/%d/%Y' # human, month digit then day # 01/15/2023 # 65
snn = '%B-%d-%Y' # system, month name then day # January-15-2023 # 66
snd = '%m-%d-%Y' # system, month digit then day # 01-15-2023 # 67
hsn = '%d %B, %Y' # human, day then month name # 15 January, 2023 # 68
hsd = '%d/%m/%Y' # human, day then month digit # 01/15/2023 # 69
ssn = '%d-%B-%Y' # system, day then month name # 15-January-2023 # 70
ssd = '%d-%m-%Y' # system, day then month digit # 01-15-2023 # 71
hn12 = '%I:%M %p' # 12h, human # 02:34 PM # 72
hs12 = '%I:%M:%S %p' # 12h, human, seconds # 02:34:56 PM # 73
sn12 = '%I.%M_%p' # 12h, system # 02.34_PM # 74
ss12 = '%I.%M.%S_%p' # 12h, system, seconds # 02.34.56_PM # 75
hn24 = '%H:%M' # 24h, human # 14:34 # 76
hs24 = '%H:%M:%S' # 24h, human, seconds # 14:34:56 # 77
sn24 = '%H.%M' # 24h, system # 14.34 # 78
ss24 = '%H.%M.%S' # 24h, system, seconds # 14.34.56 # 79
translations = {
"microsecond": "%f",
"second": "%S",
"sec": "%S",
"s": "%S",
"minute": "%M",
"min": "%M",
"m": "%M",
"12h": "%I",
"24h": "%H",
"AM/PM": "%p",
"short weekday": "%a",
"long weekday": "%A",
"numeric weekday": "%w",
"day": "%d",
"day of year": "%j",
"Sunday weeks": "%U",
"Monday weeks": "%W",
"short month": "%b",
"long month": "%B",
"numeric month": "%m",
"date": "%x",
"time": "%X",
"date time": "%c",
"ISO 8601 year": "%G",
"ISO 8601 weekday": "%u",
"ISO 8601 weeknumber": "%V",
"decade": "%y",
"century": "%C",
"year": "%Y",
"UTC offset": "%z",
"timezone": "%Z"
}
import datetime
import pytz
from typing import Optional, Union
formats = ['hnnnn12', 'hnndn12', 'hnnns12', 'hnnds12', 'snnnn12', 'snndn12', 'snnns12', 'snnds12', 'hsnnn12', 'hsndn12', 'hsnns12', 'hsnds12', 'ssnnn12', 'ssndn12', 'ssnns12', 'ssnds12', 'hnnnn24', 'hnndn24', 'hnnns24', 'hnnds24', 'snnnn24', 'snndn24', 'snnns24', 'snnds24', 'hsnnn24', 'hsndn24', 'hsnns24', 'hsnds24', 'ssnnn24', 'ssndn24', 'ssnns24', 'ssnds24', 'hnsnn12', 'hnsdn12', 'hnsns12', 'hnsds12', 'snsnn12', 'snsdn12', 'snsns12', 'snsds12', 'hssnn12', 'hssdn12', 'hssns12', 'hssds12', 'sssnn12', 'sssdn12', 'sssns12', 'sssds12', 'hnsnn24', 'hnsdn24', 'hnsns24', 'hnsds24', 'snsnn24', 'snsdn24', 'snsns24', 'snsds24', 'hssnn24', 'hssdn24', 'hssns24', 'hssds24', 'sssnn24', 'sssdn24', 'sssns24', 'sssds24', 'hnn', 'hnd', 'snn', 'snd', 'hsn', 'hsd', 'ssn', 'ssd', 'hn12', 'hs12', 'sn12', 'ss12', 'hn24', 'hs24', 'sn24', 'ss24']
def get_datetime(fmt: Optional[Union[str, int]] = 13, tz: Optional[str] = None, dt: Optional[datetime.datetime] = None) -> str:
"""
Get the datetime in the specified format and timezone.
Args:
fmt (Union[str, int], optional): The desired datetime strftime format ID. Defaults to 13 (ssndn12: 01-15-2023_02.34_PM).
tz (str, optional): The desired timezone for datetime timezone. Does not affect output if custom_datetime is not None. If value is None, defaults to UTC.
dt (datetime.datetime, optional): Override for custom datetime. Defaults to datetime.datetime.now(timezone).
Returns:
str: Formatted datetime string.
"""
tz = pytz.timezone(tz) if tz else pytz.UTC
dt = dt if dt else datetime.datetime.now(tz)
if isinstance(fmt, int):
if (fmt >= 0) and (fmt <= len(formats)): fmt = eval(formats[fmt])
else: raise ValueError(f'fmt must be between 0 and {len(formats)} inclusive.')
if not isinstance(fmt, str): raise TypeError(f'fmt must be type str, not {type(fmt)}')
return str(dt.strftime(fmt))
def valid_timezones(): return pytz.all_timezones
def get_format(format: int): return eval(formats[format])
def create_format(format: str = None): return format % translations if format else translations
print(get_datetime(tz='US/Eastern')) | zmp | /zmp-6.9-py3-none-any.whl/modules/betterDateTime.py | betterDateTime.py |
# zabbix-monitoring-programs-execution
The program controls the execution of any programs, scripts or commands OS and sends the execution result to zabbix, and in case of an execution error, it additionally can notify via telegram.
**NOTE**: Any programs, scripts or commands OS that is controlled by zm.py in the future, I will call - process.
## Work logic
Logging is done in stdout.
All zm.py settings are performed through environment variables.
Telegram notifications can be turned off with `ZM_TELEGRAM_NOTIF=False`. In this case, you will only receive alerts from Zabbix in which you can also set up Telegram alerts, but zm.py has more informative alerts.
Send data to can be turned off with Zabbix `ZM_ZABBIX_SEND=False`. In this case, you will only receive alerts to Telegram.
Send process time execution to can be turned off with Zabbix `ZM_ZABBIX_SEND_TIME=False`.
Only error messages are sent to Telegram. Messages about the successful completion of the process are not sent to Telegram (so that there is no flood).
In case of successful completion of the process, the process execution time and the successful result are sent to Zabbix. The value of successful result is set to ZM_ZABBIX_OK.
In case of the process execution error, execution time = 0 and the unsuccessful result are sent to Zabbix. The value of unsuccessful result is set to ZM_ZABBIX_NOT_OK.
You can run zm.py in a Docker container.
## Settings
| ENV | Default | Description |
|----------|------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ZM_DEBUG` | `False` | Enable DEBUG mode? (True or False). |
| `HOSTNAME` | `Unknown` | For Telegram message to see which host this message is from. In Linux, such a variable is usually already set. |
| Zabbix settings |||
| `ZM_ZABBIX_SEND` | `True` | Should app send data to Zabbix? (True or False). |
| `ZM_ZABBIX_SEND_TIME` | `True` | Should app send execution time to Zabbix? (True or False). |
| `ZM_ZABBIX_OK` | `0` | OK value for Zabbix. |
| `ZM_ZABBIX_NOT_OK` | `1` | Not OK value for Zabbix. |
| `ZM_ZABBIX_IP` | `None` | Zabbix server ip address. |
| `ZM_ZABBIX_HOST_NAME` | `None` | Zabbix "Host name". How is the host named in Zabbix. (See picture after table). |
| `ZM_ZABBIX_ITEM_NAME` | `None` | How is the trapped item key named in Zabbix. |
| `ZM_ZABBIX_ITEM_TIME_NAME` | `None` | How is the trapped item for execution time key named in Zabbix. |
| Telegram settings |||
| `ZM_TELEGRAM_NOTIF` | `True` | Should app send telegram alerts? or log messages only to stdout. (True or False). |
| `ZM_TELEGRAM_TIMEOUT` | `10` | Telegram connection timeout. |
| `ZM_TELEGRAM_BOT_TOKEN` | `None` | Telegram bot token. It usually looks like this `1470616475:AAHFSvznxxLTDedQBSiRVrYVP49ixkghpRT`. You need to create a bot in Telegram using [BotFather](https://t.me/BotFather) and you can also get a bot token there. |
| `ZM_TELEGRAM_CHAT` | `None` | Telegram chat (ID) to which the bot will send messages. If this is a private chat, then usually the ID looks like a positive number. If it is a channel or group then ID is a negative number. |
**NOTE**: Parameter `ZM_ZABBIX_HOST_NAME` you can see here

## Install and run
### Install Python3
[Python Download](https://www.python.org/downloads/)
### Customize Zabbix
In this example, `ZM_ZABBIX_ITEM_NAME` will be called `docker-rmi-sh` and `ZM_ZABBIX_ITEM_TIME_NAME` - `docker-rmi-sh-time`. This name will be written in the `Key` field.
Create trapped items `ZM_ZABBIX_ITEM_NAME` and if you need `ZM_ZABBIX_ITEM_TIME_NAME`.


Create trigger for `ZM_ZABBIX_ITEM_NAME` with that Expression:
`{172.26.12.168:docker-rmi-sh.last()}=1 or {172.26.12.168:docker-rmi-sh.nodata(25h)}<>0`
The trigger fires when there was an error while executing the process or when the process has not run for more than 25 hours.

You can see Graphs for items: menu **Monitoring** - **Latest data** - **Filter.Hosts** choose desired host - there is **Graph** in the item line.
Or you cat create your own graphs.

### Settings
You must set environment variables on the computer where the zm.py will run and under the account under which zm.py will run.
There are many ways to define environment variables.
### Run
In this example, I write all the necessary variables in file `.bash_profile`.
```commandline
export ZM_ZABBIX_IP="172.26.12.86"
export ZM_ZABBIX_HOST_NAME="172.26.12.168"
export ZM_ZABBIX_ITEM_NAME="docker-rmi-sh"
export ZM_ZABBIX_ITEM_TIME_NAME="docker-rmi-sh-time"
export ZM_TELEGRAM_BOT_TOKEN="1470616475:AAHFSvznxxLTDedQBSiRVrYVP49ixkghpRT"
export ZM_TELEGRAM_CHAT="123456789"
```
#### 1) As script
```commandline
mkdir /usr/share/zabbix-monitoring-programs-execution
cd /usr/share/zabbix-monitoring-programs-execution
git clone https://github.com/MinistrBob/zabbix-monitoring-programs-execution.git .
pip3 install -r requirements.txt
python3 /usr/share/zabbix-monitoring-programs-execution/zm.py <process>
```
#### 2) As cronjob (or if you use sudo -s or su)
If you use cronjob or if you use sudo -s or su you will need `source` command
```commandline
MAILTO=""
0 3 * * * source /home/user/.bash_profile; python3 /usr/share/zabbix-monitoring-programs-execution/zm.py /usr/share/local/docker-rmi.sh 2>&1
```
## For developers
### Get and install requirements (requirements.txt)
`c:\MyGit\zabbix-monitoring-programs-execution\venv\Scripts\pip.exe freeze | Out-File -Encoding UTF8 c:\MyGit\zabbix-monitoring-programs-execution\requirements.txt`
`pip install -r c:\MyGit\zabbix-monitoring-programs-execution\requirements.txt`
### Publish the package on pypi.org
```
python setup.py sdist
twine upload dist/*
```
### Telegram
[sendMessage](https://telegram-bot-sdk.readme.io/reference/sendmessage) `https://api.telegram.org/bot{token}/sendMessage`
Example message (html):
```html
MESSAGE: ❌ Test <b>bold</b>,
<strong>bold</strong>
<i>italic</i>, <em>italic</em>
<a href="URL">inline URL</a>
<code>inline fixed-width code</code>
<pre>pre-formatted fixed-width code block</pre>
```
| zmpe | /zmpe-1.0.4.tar.gz/zmpe-1.0.4/README.md | README.md |
import msgpack
import zmq
from dataclasses import asdict
from .schema.completion import ChatCompletion, ChatCompletionChoice, ChatCompletionUsage, ChatCompletionLogprobs
from .schema.request import Request, Message
class LlamaClient:
"""
LlamaClient is a client class to communicate with a server using ZeroMQ and MessagePack.
"""
def __init__(self, host: str):
"""
Initializes the LlamaClient with the given host.
:param host: The server host to connect to.
"""
self.context = zmq.Context() # Creating a new ZeroMQ context
self.socket = self.context.socket(zmq.REQ) # Creating a new request socket
self.socket.connect(host) # Connecting to the provided host
def send_request(self, request: Request) -> ChatCompletion:
"""
Sends a request to the server and receives a response.
:param request: The request object to be sent.
:return: The unpacked ChatCompletion response.
"""
request_dict = asdict(request) # Convert the request dataclass to a dictionary
packed_request = msgpack.packb(request_dict) # Pack the request dictionary using MessagePack
self.socket.send(packed_request) # Send the packed request to the server
response = self.socket.recv() # Receive the response from the server
return self._unpack_response(response) # Unpack and return the response
@staticmethod
def _unpack_logprobs(data: bytes):
"""
Unpacks logprobs data from bytes.
:param data: The packed logprobs data.
:return: The unpacked ChatCompletionLogprobs object.
"""
text_offset, token_logprobs, tokens, top_logprobs = data
return ChatCompletionLogprobs(text_offset, token_logprobs, tokens, top_logprobs)
@staticmethod
def _unpack_message(data: bytes):
"""
Unpacks message data from bytes.
:param data: The packed message data.
:return: The unpacked Message object.
"""
role, content, name, function_call = data
return Message(role, content, name, function_call)
@staticmethod
def _unpack_choice(data: bytes):
"""
Unpacks choice data from bytes.
:param data: The packed choice data.
:return: The unpacked ChatCompletionChoice object.
"""
index, message, logprobs, finish_reason = data
message = LlamaClient._unpack_message(message)
if logprobs is not None:
logprobs = LlamaClient._unpack_logprobs(logprobs)
return ChatCompletionChoice(index, message, logprobs, finish_reason)
@staticmethod
def _unpack_usage(data: bytes):
"""
Unpacks usage data from bytes.
:param data: The packed usage data.
:return: The unpacked ChatCompletionUsage object.
"""
prompt_tokens, completion_tokens, total_tokens = data
return ChatCompletionUsage(prompt_tokens, completion_tokens, total_tokens)
@staticmethod
def _unpack_completion(data: bytes):
"""
Unpacks completion data from bytes.
:param data: The packed completion data.
:return: The unpacked ChatCompletion object.
"""
id, object, created, choices, usage, key_values = data
choices = [LlamaClient._unpack_choice(choice) for choice in choices]
usage = LlamaClient._unpack_usage(usage)
return ChatCompletion(id, object, created, choices, usage)
@staticmethod
def _unpack_response(data: bytes):
"""
Unpacks the response data from bytes.
:param data: The packed response data.
:return: The unpacked ChatCompletion object.
"""
unpacked_data = msgpack.unpackb(data, raw=False) # Unpack the data using MessagePack
return LlamaClient._unpack_completion(unpacked_data) # Return the unpacked ChatCompletion object | zmq-ai-client-python | /zmq_ai_client_python-1.0.3.tar.gz/zmq_ai_client_python-1.0.3/zmq_ai_client_python/client.py | client.py |
python-zmq-message-patterns
===========================
Library to quickly build ZeroMQ based Python applications.
Introduction
------------
Library to make writing applications using `ZeroMQ <http://www.zeromq.org/>`_ message patterns through `PyZMQ <https://github.com/zeromq/pyzmq>`_ easier.
TODO: explain ``ZMessage`` and ``ZNode`` classes.
Pipeline
--------
A ventilator sends jobs to multiple worker processes, which send the results to a sink.
Channels:
* ventilator -> worker: jobs for the workers
* ventilator -> sink: IDs of jobs sent to workers, so sink knows if all jobs have completed
* worker -> sink: results
* sink -> worker: sink sends shutdown command, when finished
Diagram::
ventilator-------------+
| |
+-------+------+ |
| | | |
worker worker worker ... |
|| || || |
++------++----++ |
|| |
sink----------------+
An is a fully functional example in the examples directory (``examples/pipeline_example.py``).
| zmq-message-patterns | /zmq-message-patterns-0.1.0.tar.gz/zmq-message-patterns-0.1.0/README.rst | README.rst |
# TODO: module description
# TODO: copyright notice
import zmq
from .zmessage import ZMessage
from .znode import ZNode
try:
from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar
VentilatorWorkerMessageType = TypeVar('VentilatorWorkerMessageType', bound='VentilatorToWorkerMessage')
WorkerSinkMessageType = TypeVar('WorkerSinkMessageType', bound='WorkerToSinkMessage')
except ImportError:
pass
class SinkToWorkerMessage(ZMessage):
types = ('shutdown',)
class VentilatorToWorkerMessage(ZMessage):
pass
class VentilatorToSinkMessage(ZMessage):
types = ('ventilator job', 'finished')
required_data = ('request_id',)
class WorkerToSinkMessage(ZMessage):
types = ('job done',)
required_data = ('ventilator_request_id',)
class Sink(ZNode):
"""
Receive messages from workers and job IDs from ventilator:
Subclass and implement :py:meth:`handle_result()`.
"""
def __init__(self, worker_results_addr, worker_control_addr, job_ids_to_sink_addr): # type: (str, str, str) -> None
super(Sink, self).__init__()
self.unfinished_request_ids = [] # type: List[str]
self.unknown_ventilator_request_ids = [] # type: List[str]
self.add_socket('worker_results', 'bind', 'PULL', worker_results_addr)
self.add_socket('worker_control', 'bind', 'PUB', worker_control_addr)
self.add_socket('job_ids_to_sink', 'connect', 'PULL', job_ids_to_sink_addr)
self.poller = None # type: zmq.Poller
def init(self, install_sig_handler=True): # type: (Optional[bool]) -> None
super(Sink, self).init(install_sig_handler)
self.poller = zmq.Poller()
self.poller.register(self.sockets['worker_results'], zmq.POLLIN)
self.poller.register(self.sockets['job_ids_to_sink'], zmq.POLLIN)
def cleanup(self):
if self.unknown_ventilator_request_ids:
self.logger.warning(
'Received %f results from workers that could not be matched to requests sent by ventilator: %r',
len(self.unknown_ventilator_request_ids),
self.unknown_ventilator_request_ids
)
if self.unfinished_request_ids:
self.logger.error('Missing %d results: %r', len(self.unfinished_request_ids), self.unfinished_request_ids)
super(Sink, self).cleanup()
def handle_result(self, request): # type: (WorkerSinkMessageType) -> None
raise NotImplemented()
def run(self, *args, **kwargs): # type: (*Any, **Any) -> Any
ventilator_finished = False
while True:
polled = self.poller.poll()
socks = dict(polled)
if self.sockets['job_ids_to_sink'] in socks:
request_v = self.receive('job_ids_to_sink', VentilatorToSinkMessage) # type: VentilatorToSinkMessage
if request_v.type == 'ventilator job':
request_id = request_v['request_id']
try:
# worker finished before ventilator message was received by sink
self.unknown_ventilator_request_ids.remove(request_id)
except ValueError:
self.unfinished_request_ids.append(request_id)
elif request_v.type == 'finished':
ventilator_finished = True
if self.sockets['worker_results'] in socks:
request_w = self.receive('worker_results', WorkerToSinkMessage) # type: WorkerSinkMessageType
try:
self.unfinished_request_ids.remove(request_w['ventilator_request_id'])
self.handle_result(request_w)
except ValueError:
self.unknown_ventilator_request_ids.append(request_w['ventilator_request_id'])
if ventilator_finished and not self.unfinished_request_ids:
if self.unknown_ventilator_request_ids:
self.logger.error(
'[%s] Received worker message(s) with unknown ventilator_request_id: %r.',
self.name, request_w['ventilator_request_id']
)
self.logger.debug('[%s] Workers finished all jobs, telling them to shut down.', self.name)
# self.send('worker_control', SinkToWorkerMessage('shutdown'))
self.sockets['worker_control'].send_string('shutdown')
self.messages_sent_count['shutdown'] += 1
break
class Ventilator(ZNode):
"""
Sends messages to workers and sink:
* a VentilatorToWorkerMessage with a job to workers (socket `jobs_to_workers`)
* a VentilatorToSinkMessage with the ID of the VentilatorToWorkerMessage to the sink (socket `job_ids_to_sink`)
* a VentilatorToSinkMessage with type `finished` to the sink, once all jobs and job IDs have been sent
Subclass and implement :py:meth:`requests()`.
"""
def __init__(self, jobs_to_workers_addr, job_ids_to_sink_addr, jobs_in_hwm=None):
# type: (str, str, Optional[bool]) -> None
"""
:param str jobs_to_workers_addr: address to bind to, workers will connect to this (e.g. `tcp://*:5555`)
:param str job_ids_to_sink_addr: address to bind to, sink will connect to this (e.g. `tcp://*:5556`)
"""
super(Ventilator, self).__init__()
if jobs_in_hwm:
jobs_in_kwargs = dict(rcvhwm=jobs_in_hwm, sndhwm=jobs_in_hwm)
else:
jobs_in_kwargs = {}
self.add_socket('jobs_to_workers', 'bind', 'PUSH', jobs_to_workers_addr, **jobs_in_kwargs)
self.add_socket('job_ids_to_sink', 'bind', 'PUSH', job_ids_to_sink_addr)
def requests(self): # type: () -> Iterator[VentilatorWorkerMessageType]
"""Iterator that yields VentilatorToWorkerMessage objects"""
raise NotImplemented()
def run(self, *args, **kwargs): # type: (*Any, **Any) -> None
assert 'job_ids_to_sink' in self.sockets
assert 'jobs_to_workers' in self.sockets
for request in self.requests():
self.send_job(request)
self.send_finished()
def send_job(self, request):
request_s = VentilatorToSinkMessage('ventilator job', request_id=request.id)
self.send('job_ids_to_sink', request_s)
self.send('jobs_to_workers', request)
def send_finished(self):
request_s = VentilatorToSinkMessage('finished', request_id=0)
self.send('job_ids_to_sink', request_s)
class Worker(ZNode):
"""
Set VentilatorWorkerMessageCls to your subclass of VentilatorToWorkerMessage.
"""
VentilatorWorkerMessageCls = VentilatorToWorkerMessage # type: VentilatorWorkerMessageType
def __init__(self, jobs_in_addr, worker_control_addr, results_out_addr, jobs_in_hwm=None):
# type: (str, str, str, Optional[int]) -> None
"""HWM limiting is not stable."""
super(Worker, self).__init__()
if jobs_in_hwm:
jobs_in_kwargs = dict(rcvhwm=jobs_in_hwm, sndhwm=jobs_in_hwm)
else:
jobs_in_kwargs = {}
self.add_socket('jobs_in', 'connect', 'PULL', jobs_in_addr, **jobs_in_kwargs)
self.add_socket('worker_control', 'connect', 'SUB', worker_control_addr)
self.add_socket('results_out', 'connect', 'PUSH', results_out_addr)
self.poller = None # type: zmq.Poller
def init(self, install_sig_handler=True): # type: (Optional[bool]) -> None
super(Worker, self).init(install_sig_handler)
self.poller = zmq.Poller()
self.poller.register(self.sockets['jobs_in'], zmq.POLLIN)
self.poller.register(self.sockets['worker_control'], zmq.POLLIN)
self.sockets['worker_control'].setsockopt_string(zmq.SUBSCRIBE, u'shutdown')
def do_work(self, request): # type: (VentilatorWorkerMessageType) -> WorkerSinkMessageType
"""
Do the work.
:param VentilatorToWorkerMessage request: the ventilators request
:return: message to send to sink
:rtype: WorkerToSinkMessage
"""
raise NotImplemented()
def run(self, *args, **kwargs): # type: (*Any, **Any) -> None
while True:
polled = self.poller.poll()
socks = dict(polled)
if self.sockets['jobs_in'] in socks:
request = self.receive('jobs_in', self.VentilatorWorkerMessageCls) # type: VentilatorWorkerMessageType
result = self.do_work(request)
self.send('results_out', result)
if self.sockets['worker_control'] in socks:
worker_control_string = self.sockets['worker_control'].recv_string()
if worker_control_string == 'shutdown':
break
if __name__ == "__main__":
import doctest
doctest.testmod() | zmq-message-patterns | /zmq-message-patterns-0.1.0.tar.gz/zmq-message-patterns-0.1.0/zmessage/pipeline.py | pipeline.py |
# TODO: module description
# TODO: copyright notice
import os
import sys
import inspect
import signal
import logging
import threading
from collections import defaultdict, namedtuple
import zmq
from .zmessage import ZMessage
try:
from typing import Any, Dict, List, Optional, Type
from .zmessage import ZMessageType
except ImportError:
pass
# Socket configuration type
SocketConfig = namedtuple('SocketConfig', ('name', 'method', 'type', 'addr', 'attrs'))
class ZException(Exception):
"""Base class of all exceptions created by ZNode"""
pass
class ConnectionError(ZException):
"""
Error connecting to a socket.
Original exception raised by ZMQ is in :py:attr:`zmq_exc`.
"""
zmq_exc = None # type: zmq.error.ZMQError
def __init__(self, *args, **kwargs): # type: (*Any, **Any) -> None
self.zmq_exc = kwargs.pop('zmq_exc', None)
super(ConnectionError, self).__init__(*args, **kwargs)
class InvalidRequest(ZException):
"""
Request failed validation by :py:meth:`is_valid()` when checked before
sending or after receiving it.
"""
pass
class MessageFormatError(ZException):
"""
Received message cannot be transformed into a ZMessage object because it's
not in the required format.
"""
pass
class ZNode(object):
"""
Base class for socket handlers.
Usually it is enough to setup the sockets configuration in
:py:meth:`__init__()` and the request handling code in :py:meth:`run()`:
::
class ResultSink(ZNode):
def __init__(self):
super(ResultSink, self).__init__()
self.add_socket('from_workers', 'bind', 'PULL', 'tcp://127.0.0.1:5558')
def run(self):
while True:
request = self.receive('from_workers')
type = request.type
if type == 'shutdown':
break
elif type == 'job done':
...
ResultSink().start()
"""
pid = 0 # process ID
name = '' # identifier for this object
in_thread = False # if True, SIGINT handler will not be installed
signal_num = signal.SIGINT # signal that will trigger handler
sockets = None # type: Dict[str, zmq.Socket] # holds connected ZMQ sockets that were added through add_socket()
logger = None # type: logging.Logger # logging instance with name 'zmessage.znode'
context = None # type: zmq.Context # ZMQ context
_socket_configs = None # type: List[SocketConfig]
def __init__(self): # type: () -> None
self.pid = os.getpid()
self.name = '{}.{}'.format(self.__class__.__name__, self.pid)
self.logger = logging.getLogger(__name__)
self._socket_configs = []
self.sockets = {}
self.messages_received_count = defaultdict(int) # type: Dict[str, int]
self.messages_sent_count = defaultdict(int) # type: Dict[str, int]
self._cleaned_up = False
def init(self, install_sig_handler=True): # type: (Optional[bool]) -> None
"""
Initialize sockets and install a signal handler.
Creates ZMQ context, calls :py:meth:`connect()` to bind/connect all
sockets and optionally installs the method :py:meth:`signal_handler()`
as handler for signal :py:attr:`self.signal_num` (default SIGINT).
If used as an aggregate and a ZMQ context already exist, set
:py:attr:`context` before calling :py:meth:`init()` or
:py:meth:`start()`.
Regardless of `install_sig_handler` the signal handler will *not* be
installed if :py:attr:`self.in_thread` is True or the currents threads
name is not `MainThread`.
All methods called :py:meth:`pre_init_*()` will be called
(lexicographically sorted) at the start of :py:meth:`init()`, and all
methods called :py:meth:`post_init_*()` will be called (lex. sorted)
at the end of :py:meth:`init()`.
:param bool install_sig_handler: whether to install a signal handler
:return: None
"""
self._call_pre_inits()
self.pid = os.getpid()
if not self.context:
self.context = zmq.Context()
self.connect()
self.in_thread = self.in_thread or (threading.current_thread().getName() != 'MainThread')
if install_sig_handler and not self.in_thread:
signal.signal(self.signal_num, self.signal_handler)
self._call_post_inits()
def signal_handler(self, signum, frame): # type: (int, Any) -> None
"""
Handler for signal :py:attr:`self.signal_num` (default SIGINT) if
installed by :py:meth:`init()`.
Default implementation will run `self.cleanup(); sys.exit(0)`.
:param int signum: the signal that lead to calling this function
:param frame: current stack frame
:type frame: None or frame object
:return:
"""
self.logger.warn('[%s] Received signal %r, shutting down.', self.name, 'SIGINT' if signum == 2 else signum)
self.cleanup()
sys.exit(1)
def add_socket(self, name, method, socket_type, addr, **attrs): # type: (str, str, str, str, **Any) -> None
"""
Add a socket configuration. The socket will be connected / bound in
:py:meth:`connect()` -> :py:meth:`connect_socket()` which will be
called by :py:meth:`start()` -> :py:meth:`init()`. The order of
:py:meth:`add_socket()` calls will be honored when connecting/binding.
It will *then* be available as :py:attr:`self.sockets.name`. The
attributes in `attrs` will be set before connecting/binding.
:param str name: the socket will be available as :py:attr:`self.sockets.name`.
:param str method: either `bind` or `connect`
:param str socket_type: ZeroMQ socket type (e.g. `DEALER`, `PAIR`, `PUB`, ...)
:param str addr: ZeroMQ protocol and address string (e.g. `tcp://*:5555`)
:param attrs: attributes and values to apply to socket object, eg. rcvhwm=100, sndhwm=100
:return: None
:raises AssertionError: when an argument is invalid
"""
assert name not in [c.name for c in self._socket_configs], 'Socket name already used.'
assert method in ('bind', 'connect'), 'Unknown socket connect method.'
assert socket_type in ('DEALER', 'PAIR', 'PUB', 'PULL', 'PUSH', 'REP', 'REQ', 'ROUTER', 'SUB'), 'Unknown socket type.'
assert hasattr(zmq, socket_type), 'Unknown socket type.'
assert any(addr.startswith('{}://'.format(proto)) for proto in ('inproc', 'ipc', 'tcp', 'pgm', 'epgm')), 'Unknown protocol.'
self._socket_configs.append(SocketConfig(name, method, getattr(zmq, socket_type), addr, attrs))
def connect_socket(self, socket_config): # type: (SocketConfig) -> zmq.Socket
"""
Create ZMQ socket and connect or bind it according to its configuration
previously created by :py:meth:`add_socket()`.
:param SocketConfig socket_config: configuration of socket
:return: ZMQ socket
:rtype: zmq.Socket
:raises zmq.error.ZMQError: when a socket cannot be bound/connected to
"""
socket = self.context.socket(socket_config.type)
for k, v in socket_config.attrs.items():
setattr(socket, k, v)
connect_or_bind_method = getattr(socket, socket_config.method)
connect_or_bind_method(socket_config.addr)
return socket
def connect(self): # type: () -> None
"""
Create ZMQ sockets and connect or bind them according to their
configuration previously created by :py:meth:`add_socket()`.
:return: None
:raises zmq.error.ZMQError: when a socket cannot be bound/connected to
"""
for socket_config in self._socket_configs:
try:
socket = self.connect_socket(socket_config)
except zmq.error.ZMQError as exc:
msg = '[{}] Error {} socket {!r} to {!r}: {}'.format(
self.name,
'binding' if socket_config.method == 'bind' else 'connecting',
socket_config.name,
socket_config.addr,
exc)
raise ConnectionError(msg, zmq_exc=exc)
self.sockets[socket_config.name] = socket
def run(self, *args, **kwargs): # type: (*Any, **Any) -> Any
"""
Put your logic here.
If a custom ZMessage subclass with expanded :py:meth:`.is_valid()` is
used, tests for invalid :py:attr:`type` can be omitted.
::
def run(self):
while True:
request = self.receive('from_ventilator', VentilatorMessage)
if request.type == 'do stuff':
...
self.send('to_sink', result)
elif request.type == 'shutdown':
break
:return: whatever you want
"""
raise NotImplementedError()
def start(self, install_sig_handler=True, cleanup=True, *args, **kwargs):
# type: (Optional[bool], Optional[bool], *Any, **Any) -> Any
"""
Use this function to start your application objects execution.
It simply runs :py:meth:`init()`; :py:meth:`run()` and
:py:func:`finally` :py:meth:`cleanup()`.
:param bool install_sig_handler: will be passed to :py:meth:`init()`
:param bool cleanup: whether to automatically call :py:meth:`cleanup()` after :py:meth:`run()`
:param args: will be passed to :py:meth:`run()`
:param kwargs: will be passed to :py:meth:`run()`
:return: whatever :py:meth:`run()` returns
"""
self.init(install_sig_handler)
try:
return self.run(*args, **kwargs)
finally:
if cleanup:
self.cleanup()
def cleanup(self): # type: () -> None
"""
Close sockets and terminate context.
:return: None
"""
if self._cleaned_up:
return
def context_term_handler(signum, frame): # type: (int, Any) -> None
# context will automatically be closed when this is garbage collected
pass
self.logger.debug('%r exiting after receiving messages: %r and sending messages: %r.',
self.name,
dict(self.messages_received_count) if self.messages_received_count else 0,
dict(self.messages_sent_count) if self.messages_sent_count else 0)
self.logger.debug('[%s] Cleanup of network sockets...', self.name)
for socket_config in self._socket_configs:
self.sockets[socket_config.name].close()
previous_handler = None
if not self.in_thread:
previous_handler = signal.signal(signal.SIGALRM, context_term_handler)
signal.alarm(1)
self.context.term()
if previous_handler:
signal.signal(signal.SIGALRM, previous_handler)
self._cleaned_up = True
self.logger.debug('[%s] Cleanup done.', self.name)
def send(self, socket_name, request): # type: (str, ZMessageType) -> None
"""
Send a request.
:param str socket_name: name of socket to send from
:param ZMessage request: message to send
:return: None
:raises InvalidRequest: when `request` not :py:meth:`is_valid()`
"""
assert socket_name in self.sockets, 'Unknown socket {!r}.'.format(socket_name)
if not request.is_valid():
raise InvalidRequest('[{}] Not sending invalid request: {}.'.format(self.name, request))
socket = self.sockets[socket_name]
socket.send_json(request.to_dict())
self.messages_sent_count[request.type] += 1
def receive(self, socket_name, message_cls=ZMessage): # type: (str, Optional[Type[ZMessage]]) -> ZMessageType
"""
Receive a message.
:param str socket_name: the socket to receive from
:param type message_cls: class to create message object from
:return: the received message
:rtype: ZMessage
:raises MessageFormatError: when received message cannot be converted to a ZMessage
:raises InvalidRequest: when received ZMessage not :py:meth`is_valid()`
"""
assert socket_name in self.sockets, 'Unknown socket {!r}.'.format(socket_name)
assert issubclass(message_cls, ZMessage), "Argument 'message_cls' must be a ZMessage (sub)class."
socket = self.sockets[socket_name]
message = socket.recv_json() # type: dict
try:
request = message_cls.from_dict(message) # type: ZMessageType
request.id = message['id']
self.messages_received_count[request.type] += 1
except (IndexError, TypeError) as exc:
self.messages_received_count['_bad_format_'] += 1
raise MessageFormatError('[{}] Received request has bad format: {}.'.format(self.name, exc))
if not request.is_valid():
self.messages_received_count['_invalid_request_'] += 1
raise InvalidRequest('[{}] Received invalid request: {}.'.format(self.name, request))
return request
def _call_pre_inits(self):
"""Run all methods with a name starting with 'pre_init_' (in lexicographical order)."""
methods = [name for name, member in inspect.getmembers(self, inspect.ismethod) if name.startswith('pre_init_')]
for method in sorted(methods):
method()
def _call_post_inits(self):
"""Run all methods with a name starting with 'post_init_' (in lexicographical order)."""
methods = [name for name, member in inspect.getmembers(self, inspect.ismethod) if name.startswith('post_init_')]
for method in sorted(methods):
method() | zmq-message-patterns | /zmq-message-patterns-0.1.0.tar.gz/zmq-message-patterns-0.1.0/zmessage/znode.py | znode.py |
# TODO: module description
# TODO: copyright notice
import uuid
import zlib
import base64
import logging
import collections
from six import string_types
try:
from typing import Any, Dict, Iterable, Iterator, Tuple, TypeVar
ZMessageType = TypeVar('ZMessageType', bound='ZMessage')
except ImportError:
pass
class ZMessage(collections.MutableMapping):
"""
Base class for messages received by :py:meth:`ZNode.receive()` and sent
from :py:meth:`ZNode.send()`.
All objects have a UUID in the :py:attr:`id` attribute and a name for the
message type (to help the recipient decide what to do with it) in
:py:attr:`type`.
Payload is added and retrieved through a dictionary interface. Only strings
are allowed as keys.
Set :py:attr:`types` and :py:attr:`required_data` or even expand
:py:meth:`is_valid()` to remove error checking code from
:py:meth:`ZNode.run()`. See docstring of :py:meth:`is_valid()` for an
example.
To meet special marshalling requirements, customize
:py:meth:`to_dict()` and :py:meth:`from_dict`.
"""
types = () # type: Iterable[str] # list of allowed values for `type`
required_data = () # type: Iterable[str] # list of keys that must be exist in message data
def __init__(self, mtype, **kwargs): # type: (str, **Any) -> None
"""
Message object.
:param str type: message type - use this in the recipient to determine what to do with the message
:param kwargs: payload
"""
self.type = mtype
self.id = str(uuid.uuid4())
self._data = {} # type: Dict[str, Any]
self.update(kwargs)
self.logger = logging.getLogger(__name__)
def __delitem__(self, key): # type: (str) -> Any
if key in ('type', 'id'):
raise KeyError('Deleting {!r} is forbidden.'.format(key))
del self._data[key]
def __getitem__(self, key): # type: (str) -> Any
if key in ('type', 'id'):
return super(ZMessage, self).__getitem__(key)
else:
return self._data[key]
def __iter__(self): # type: () -> Iterator[str]
return iter(self._data)
def __len__(self): # type: () -> int
return len(self._data)
def __repr__(self): # type: () -> str
return '{!s}(mtype={!r}, id={!r}, data={!r})'.format(self.__class__.__name__, self.type, self.id, self._data)
def __setitem__(self, key, value): # type: (str, Any) -> None
if key in ('type', 'id'):
super(ZMessage, self).__setitem__(key, value)
else:
if not isinstance(key, string_types):
raise TypeError('Only strings are allowed as keys.')
self._data[key] = value
def __eq__(self, other): # type: (object) -> bool
# duck typing: allow object that is not a ZMessage (subclass) instance,
# as long as it has attributes 'id', 'type' and a dict interface
return all((
self.id == getattr(other, 'id'),
self.type == getattr(other, 'type'),
set(self.items()) == set((key, self[key]) for key in getattr(other, '__iter__', lambda: [])())
))
@staticmethod
def decode_binary(data): # type: (bytes) -> bytes
"""
Helper function. Will decode data encoded with
:py:func:`encode_binary()`.
::
>>> s = b'foo'
>>> ZMessage.decode_binary(ZMessage.encode_binary(s)) == s
True
:param bytes data: encoded data
:return: decoded data
:rtype: bytes
"""
return zlib.decompress(base64.b64decode(data))
@staticmethod
def encode_binary(data): # type: (bytes) -> bytes
"""
Helper function. Will zlib compress `data` and base64 encode it.
:param bytes data: data already serialized to a string representation
:return: base64 encoded, zlib compress `data`
:rtype: bytes
"""
return base64.b64encode(zlib.compress(data))
def is_valid(self): # type: () -> Tuple[bool, str]
"""
Check if the message object is valid. This will be run on objects
created by :py:meth:`ZNode.receive()` and on requests before sending
them in :py:meth:`ZNode.send()`.
Validity checks performed here will simplify code in
:py:meth:`ZNode.run()`.
Set :py:attr:`types` to check if the messages value of
:py:attr:`type` is an expected one.
Set :py:attr:`required_data` to check if a message contains the
expected data.
If :py:attr:`required_data` contains an entry `foo`, and a method
:py:meth:`is_valid_foo()` is found, then it is executed. It is expected
to return the same as :py:meth:`is_valid()` does: a tuple(bool, str)
with the result of the test and an optional error message.
>>> class TestMessage(ZMessage):
... types = ('test', 'shutdown')
... required_data = ('foo', 'zoo')
... def is_valid_foo(self):
... if self['foo'].upper() != 'BAR':
... return False, 'Foo must be bar.'
... return True, ''
>>> m = TestMessage('test')
>>> m.is_valid()
(False, "Required data 'foo' is unset in message.")
>>> m['foo'] = 'poo'
>>> m.is_valid()
(False, 'Foo must be bar.')
>>> m['foo'] = 'bar'
>>> m.is_valid()
(False, "Required data 'zoo' is unset in message.")
>>> m['zoo'] = 'Python'
>>> m.is_valid()
(True, '')
:return: whether the message objects attributes have the expected values and optional error message
:rtype: tuple(bool, str)
"""
if not isinstance(self.type, string_types):
return False, "'type' must be a string."
if not isinstance(self.id, string_types) or len(self.id) != 36:
return False, "Value of 'id' must be a string containing a UUID in standard hex digits form."
if self.types and self.type not in self.types:
return False, "Value of 'type' must be one of {}.".format(', '.join(self.types))
for key in self.required_data:
if key not in self:
return False, 'Required data {!r} is unset in message.'.format(key)
try:
result, reason = getattr(self, 'is_valid_{}'.format(key))()
if not result:
return result, reason
except AttributeError:
pass
return True, ''
def to_dict(self): # type: () -> dict
"""
Marshall object to a dict for transfer over the wire.
>>> m = ZMessage('test', foo='bar')
>>> ZMessage.from_dict(m.to_dict()) == m
True
:return: marshalled object
:rtype: dict
"""
return {'id': self.id, 'type': self.type, 'data': self._data}
@classmethod
def from_dict(cls, data): # type: (dict) -> ZMessage
"""
Unmarshall after transfer: turn `data` into ZMessage object.
:param dict data: arguments to create ZMessage object from, created by :py:meth:`to_dict()`
:return: ZMessage object
:rtype: ZMessage
"""
assert 'id' in data
assert 'type' in data
assert 'data' in data
res = cls(data['type'], **data['data'])
res.id = data['id']
return res
if __name__ == "__main__":
import doctest
doctest.testmod() | zmq-message-patterns | /zmq-message-patterns-0.1.0.tar.gz/zmq-message-patterns-0.1.0/zmessage/zmessage.py | zmessage.py |
# 1. 简介
zmq_ops是Avatar训练框架依赖的一个组件,通过把它集成到tensorflow中,可以使得tensorflow支持在线实时学习和训练。它的主要功能包括:
1. 符合tensorflow io接口标准,能够和tensorflow集成在一起
2. 提供单向数据传输的PUSH-PULL模式,也支持双向数据传输的REQ-ROUTER模式
# 2. 安装
## 2.1 安装依赖
```bash
conda install zeromq
conda install tensorflow
```
## 2.2 从源码安装
```bash
# 编译前要设置conda环境路径
export CONDA_ENV_PATH=/path/to/conda/env
cd zmq_ops
python setup.py install
```
## 2.3 二进制安装
```bash
pip install zmq-ops
```
# 3. 使用
## 3.1 ZmqReader
zmq reader主要提供ZMQ中的PUSH-PULL模式中的PULL端,它提供了3个OP:
1. zmq_reader_init(end_point, hwm):初始化zmq reader
2. zmq_reader_next(resource, types, shapes):读取下一组数据
3. zmq_reader_readable(resource):判断zmq reader是否可读
## 3.2 ZmqServer
zmq server主要提供ZMQ中的REQ-ROUTER模式中的ROUTER端,它提供了3个OP
1. zmq_server_init(end_point, hwm):初始化zmq server
2. zmq_server_recv_all(resource, types, shapes, min_cnt, max_cnt):尽量从zmq server多读取数据,最少min_cnt条数据,最多max_cnt条数据,并把数据组成一个batch返回,返回client_id和tensors
3. zmq_server_send_all(resource, client_id, tensors):把tensors按照client_id发送给不同的客户端
具体使用案例可以参考zmq_reader_test.py和zmq_server_test.py文件 | zmq-ops | /zmq_ops-0.4.0.tar.gz/zmq_ops-0.4.0/README.md | README.md |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensor_array.proto',
package='avatar',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x12tensor_array.proto\x12\x06\x61vatar\x1a&tensorflow/core/framework/tensor.proto\"<\n\x10TensorArrayProto\x12(\n\x07tensors\x18\x01 \x03(\x0b\x32\x17.tensorflow.TensorProtob\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_tensor__pb2.DESCRIPTOR,])
_TENSORARRAYPROTO = _descriptor.Descriptor(
name='TensorArrayProto',
full_name='avatar.TensorArrayProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensors', full_name='avatar.TensorArrayProto.tensors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=130,
)
_TENSORARRAYPROTO.fields_by_name['tensors'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
DESCRIPTOR.message_types_by_name['TensorArrayProto'] = _TENSORARRAYPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorArrayProto = _reflection.GeneratedProtocolMessageType('TensorArrayProto', (_message.Message,), {
'DESCRIPTOR' : _TENSORARRAYPROTO,
'__module__' : 'tensor_array_pb2'
# @@protoc_insertion_point(class_scope:avatar.TensorArrayProto)
})
_sym_db.RegisterMessage(TensorArrayProto)
# @@protoc_insertion_point(module_scope) | zmq-ops | /zmq_ops-0.4.0.tar.gz/zmq_ops-0.4.0/zmq_ops/tensor_array_pb2.py | tensor_array_pb2.py |
__author__ = ('Douglas Creager <[email protected]>',
'Michal Nazarewicz <[email protected]>')
__license__ = 'This file is placed into the public domain.'
__maintainer__ = 'Michal Nazarewicz'
__email__ = '[email protected]'
__all__ = ('getVersion')
import re
import subprocess
import sys
RELEASE_VERSION_FILE = 'RELEASE-VERSION'
# http://www.python.org/dev/peps/pep-0386/
_PEP386_SHORT_VERSION_RE = r'\d+(?:\.\d+)+(?:(?:[abc]|rc)\d+(?:\.\d+)*)?'
_PEP386_VERSION_RE = r'^%s(?:\.post\d+)?(?:\.dev\d+)?$' % (
_PEP386_SHORT_VERSION_RE)
_GIT_DESCRIPTION_RE = r'^v(?P<ver>%s)-(?P<commits>\d+)-g(?P<sha>[\da-f]+)$' % (
_PEP386_SHORT_VERSION_RE)
def readGitVersion():
try:
proc = subprocess.Popen(('git', 'describe', '--long',
'--match', 'v[0-9]*.*'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data, _ = proc.communicate()
if proc.returncode:
return None
ver = data.splitlines()[0].strip()
proc = subprocess.Popen(('git', 'rev-parse', '--abbrev-ref', 'HEAD'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
branch, _ = proc.communicate()
if proc.returncode:
return None
except:
return None
if not ver:
return None
m = re.search(_GIT_DESCRIPTION_RE, ver)
if not m:
sys.stderr.write('version: git description (%s) is invalid, '
'ignoring\n' % ver)
return None
commits = int(m.group('commits'))
if not commits:
version = m.group('ver')
else:
version = '%s.post%d' % (
m.group('ver'), commits)
if branch.strip() != 'master':
version += '.dev%d' % int(m.group('sha'), 16)
return version
def readReleaseVersion():
try:
fd = open(RELEASE_VERSION_FILE)
try:
ver = fd.readline().strip()
finally:
fd.close()
if not re.search(_PEP386_VERSION_RE, ver):
sys.stderr.write('version: release version (%s) is invalid, '
'will use it anyway\n' % ver)
return ver
except:
return None
def writeReleaseVersion(version):
fd = open(RELEASE_VERSION_FILE, 'w')
fd.write('%s\n' % version)
fd.close()
def getVersion():
release_version = readReleaseVersion()
version = readGitVersion() or release_version
if not version:
raise ValueError('Cannot find the version number')
if version != release_version:
writeReleaseVersion(version)
return version
if __name__ == '__main__':
print getVersion() | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/version.py | version.py |
from datetime import datetime
from collections import OrderedDict
from pprint import pformat
import inspect
import itertools
import json
import logging
import re
import jsonschema
import zmq
from .schema import (validate, get_connect_request, get_execute_request,
get_execute_reply, decode_content_data)
# Create module-level logger.
logger = logging.getLogger(__name__)
class PluginBase(object):
def __init__(self, name, query_uri, subscribe_options=None):
'''
Plugin which can be connected to a network of other plugin instances
through a central **hub**.
## Thread-safety ##
All socket configuration, registration, etc. is performed *only* when
the `reset` method is called explicitly. Thus, all sockets are created
in the thread that calls the `reset` method.
By creating sockets in the thread the calls `reset`, it is
straightforward to, for example, run a `Plugin` in a separate process
or thread.
Args:
name (str) : Unique name across all plugins.
query_uri (str) : The URI address of the **hub** query socket.
'''
self.name = name
host_cre = re.compile(r'^(?P<transport>[^:]+)://(?P<host>[^:]+)(:(?P<port>\d+)?)')
match = host_cre.search(query_uri)
self.transport = match.group('transport')
self.host = match.group('host')
self.hub_name = 'hub'
self.query_uri = query_uri
self.query_socket = None
self.command_socket = None
self.subscribe_options = subscribe_options or {}
self.subscribe_socket = None
self.execute_reply_id = itertools.count(1)
# Registry of functions to call upon receiving `execute_reply`
# messages, keyed by the `session` field of the
# `execute_request`/`execute_reply` header.
self.callbacks = OrderedDict()
def close(self):
'''
Close all sockets.
'''
for socket in (self.query_socket, self.command_socket,
self.subscribe_socket):
if socket is not None:
socket.close()
def reset(self):
'''
Reset the plugin state.
This includes:
- Resetting the execute reply identifier counter.
- Resetting the `command`, `query`, and `publish` sockets.
- Registering with the central **hub**.
'''
self.execute_reply_id = itertools.count(1)
self.reset_query_socket()
# Get socket info and **hub** name.
connect_request = get_connect_request(self.name, self.hub_name)
reply = self.query(connect_request)
self.hub_name = bytes(reply['header']['source'])
self.hub_socket_info = reply['content']
# Initialize sockets using obtained socket info.
self.reset_subscribe_socket()
self.reset_command_socket()
# Explicitly register with the **hub** and retrieve plugin registry.
self.register()
def register(self):
'''
Register as a plugin with the central **hub**.
Registration also updates the local plugin registry, which contains the
name of all plugins registered with the **hub** at the time of
registration.
Note that this method is safe to execute multiple times. This provides
a mechanism to refresh the local plugin registry.
'''
connect_request = get_execute_request(self.name, self.hub_name,
'register')
reply = self.query(connect_request)
self.plugin_registry = decode_content_data(reply)
self.logger.info('Registered with hub at "%s"', self.query_uri)
###########################################################################
# Query socket methods
def reset_query_socket(self):
'''
Create and configure *query* socket (existing socket is destroyed if it
exists).
'''
context = zmq.Context.instance()
if self.query_socket is not None:
self.query_socket = None
self.query_socket = zmq.Socket(context, zmq.REQ)
self.query_socket.connect(self.query_uri)
def query(self, request, **kwargs):
'''
Send request message to **hub**, receive response, and return decoded
reply message.
Args:
request (dict) : `<...>_request` message.
Returns:
None
'''
try:
self.query_socket.send(json.dumps(request))
reply = json.loads(self.query_socket.recv(**kwargs))
validate(reply)
return reply
except:
self.logger.error('Query error', exc_info=True)
self.reset_query_socket()
raise
@property
def logger(self):
'''
Return logger configured with a name in the following form:
<module_name>.<class_name>.<method_name>->"<self.name>"
'''
return logging.getLogger('.'.join((__name__, str(type(self).__name__),
inspect.stack()[1][3]))
+ '->"%s"' % self.name)
###########################################################################
# Command socket methods
def reset_command_socket(self):
'''
Create and configure *command* socket (existing socket is destroyed if
it exists).
'''
context = zmq.Context.instance()
if self.command_socket is not None:
self.command_socket = None
# Create command socket and assign name as identity.
self.command_socket = zmq.Socket(context, zmq.ROUTER)
self.command_socket.setsockopt(zmq.IDENTITY, bytes(self.name))
command_uri = '%s://%s:%s' % (self.transport, self.host,
self.hub_socket_info['command']['port'])
self.command_socket.connect(command_uri)
self.logger.info('Connected command socket to "%s"', command_uri)
def send_command(self, request):
self.command_socket.send_multipart(map(str, [self.hub_name, '',
json.dumps(request)]))
def on_command_recv(self, frames):
'''
Process multi-part message from command socket.
This method may, for example, be called asynchronously as a callback in
run loop through a `ZMQStream(...)` configuration. See [here][1] for
more details.
Args:
frames (list) : Multi-part ZeroMQ message.
Returns:
None
[1]: http://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/multisocket/tornadoeventloop.html
'''
try:
message_str = frames[-1]
message = json.loads(message_str)
validate(message)
except jsonschema.ValidationError:
self.logger.error('unexpected message', exc_info=True)
message_type = message['header']['msg_type']
if message_type == 'execute_request':
self._process__execute_request(message)
elif message_type == 'execute_reply':
self._process__execute_reply(message)
else:
self.logger.error('Unrecognized message type: %s', message_type)
def _process__execute_reply(self, reply):
'''
Process validated `execute_reply` message.
If a callback function was registered during the execution request call
the callback function on the reply message.
Args:
reply (dict) : `execute_reply` message
Returns:
None
'''
try:
session = reply['header']['session']
if session in self.callbacks:
# A callback was registered for the corresponding request.
func = self.callbacks[session]
# Remove callback.
del self.callbacks[session]
# Call callback with reply.
func(reply)
else:
# No callback registered for session.
pass
except:
self.logger.error('Processing error.', exc_info=True)
def _process__execute_request(self, request):
'''
Process validated `execute_request` message, which includes the name of
the command to execute.
If a method with the name `on_execute__<command>` exists, call the
method on the `request` and send the return value wrapped in an
`execute_reply` message to the source of the request.
If the no matching method exists or if an exception is encountered
while processing the command, send `execute_reply` message with
corresponding error information to the source of the request.
Args:
reply (dict) : `execute_request` message
Returns:
None
'''
try:
func = getattr(self, 'on_execute__' +
request['content']['command'], None)
if func is None:
error = NameError('Unrecognized command: %s' %
request['content']['command'])
reply = get_execute_reply(request,
self.execute_reply_id.next(),
error=error)
else:
result = func(request)
reply = get_execute_reply(request,
self.execute_reply_id.next(),
data=result)
validate(reply)
reply_str = json.dumps(reply)
except (Exception, ), exception:
import traceback
reply = get_execute_reply(request, self.execute_reply_id.next(),
error=traceback.format_exc())
#error=exception)
reply_str = json.dumps(reply)
self.command_socket.send_multipart([self.hub_name, '', reply_str])
###########################################################################
# Subscribe socket methods
def reset_subscribe_socket(self):
'''
Create and configure *subscribe* socket (existing socket is destroyed
if it exists).
'''
context = zmq.Context.instance()
if self.subscribe_socket is not None:
self.subscribe_socket = None
# Create subscribe socket and assign name as identity.
self.subscribe_socket = zmq.Socket(context, zmq.SUB)
if self.subscribe_options:
for k, v in self.subscribe_options.iteritems():
self.subscribe_socket.setsockopt(k, v)
print 'set sock opt', k, v
subscribe_uri = '%s://%s:%s' % (self.transport, self.host,
self.hub_socket_info['publish']
['port'])
self.subscribe_socket.connect(subscribe_uri)
self.logger.info('Connected subscribe socket to "%s"', subscribe_uri)
def on_subscribe_recv(self, msg_frames):
'''
Process multi-part message from subscribe socket.
This method may, for example, be called asynchronously as a callback in
run loop through a `ZMQStream(...)` configuration. See [here][1] for
more details.
Args:
frames (list) : Multi-part ZeroMQ message.
Returns:
None
[1]: http://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/multisocket/tornadoeventloop.html
'''
import cPickle as pickle
try:
logger.info(pformat(pickle.loads(msg_frames[0])))
except:
logger.error('Deserialization error', exc_info=True)
###########################################################################
# Execute methods
def execute_async(self, target_name, command, callback=None, silent=False,
extra_kwargs=None, **kwargs):
'''
Send request to execute the specified command to the identified target.
**N.B.,** this method is non-blocking, i.e., it does not wait for a
response. For a blocking wrapper around this method, see `execute`
method below.
Args:
target_name (str) : Name (i.e., ZeroMQ identity) of the target.
command (str) : Name of command to execute.
callback (function) : Function to call on received response.
Callback signature is `callback_func(reply)`, where `reply` is
an `execute_reply` message. Callback is added to
`self.callbacks`, keyed by session identifier of request.
silent (bool) : A boolean flag which, if `True`, signals the plugin
to execute this code as quietly as possible. If `silent=True`,
reply will *not* broadcast output on the IOPUB channel.
**kwargs (dict) : Keyword arguments for command.
Returns:
(str) : Session identifier for request.
'''
if extra_kwargs is not None:
kwargs.update(extra_kwargs)
request = get_execute_request(self.name, target_name, command,
data=kwargs, silent=silent)
if callback is not None:
self.callbacks[request['header']['session']] = callback
self.send_command(request)
return request['header']['session']
def execute(self, target_name, command, timeout_s=None, wait_func=None,
silent=False, extra_kwargs=None, **kwargs):
'''
Send request to execute the specified command to the identified target
and return decoded result object.
**N.B.,** this method blocking, i.e., it waits for a response. See
`execute_async` method for non-blocking variant with `callback`
argument.
Args:
target_name (str) : Name (i.e., ZeroMQ identity) of the target.
command (str) : Name of command to execute.
**kwargs (dict) : Keyword arguments for command.
Returns:
(object) : Result from remotely executed command.
'''
# Create result object that will be updated when response is received.
result = {}
def _callback(reply):
try:
result['data'] = decode_content_data(reply)
except (Exception, ), exception:
result['error'] = exception
session = self.execute_async(target_name, command, callback=_callback,
silent=silent, extra_kwargs=extra_kwargs,
**kwargs)
start = datetime.now()
while session in self.callbacks:
try:
msg_frames = self.command_socket.recv_multipart(zmq.NOBLOCK)
except zmq.Again:
wait_duration_s = (datetime.now() - start).total_seconds()
if timeout_s is not None and (wait_duration_s > timeout_s):
raise IOError('Timed out waiting for response for request '
'(session="%s")' % session)
if wait_func is not None:
wait_func(wait_duration_s)
continue
self.on_command_recv(msg_frames)
if 'error' in result:
raise result['error']
return result['data']
class Plugin(PluginBase):
def on_execute__ping(self, request):
return 'pong' | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/zmq_plugin/plugin.py | plugin.py |
import base64
import cPickle as pickle
import copy
import json
import uuid
import arrow
import jsonschema
import yaml
# ZeroMQ Plugin message format as [json-schema][1] (inspired by
# [IPython messaging format][2]).
#
# See [here][3] for information on content transfer encodings.
#
# [1]: https://python-jsonschema.readthedocs.org/en/latest/
# [2]: http://jupyter-client.readthedocs.org/en/latest/messaging.html#messaging
# [3]: https://www.w3.org/Protocols/rfc1341/5_Content-Transfer-Encoding.html
MESSAGE_SCHEMA = {
'definitions':
{'unique_id': {'type': 'string', 'description': 'Typically UUID'},
'header' :
{'type': 'object',
'properties':
{'msg_id': {'$ref': '#/definitions/unique_id',
'description':
'Typically UUID, should be unique per message'},
'session' : {'$ref': '#/definitions/unique_id',
'description':
'Typically UUID, should be unique per session'},
'date': {'type': 'string',
'description':
'ISO 8601 timestamp for when the message is created'},
'source': {'type': 'string',
'description': 'Name/identifier of message source (unique '
'across all plugins)'},
'target': {'type': 'string',
'description': 'Name/identifier of message target (unique '
'across all plugins)'},
'msg_type' : {'type': 'string',
'enum': ['connect_request', 'connect_reply',
'execute_request', 'execute_reply'],
'description': 'All recognized message type strings.'},
'version' : {'type': 'string',
'default': '0.5',
'enum': ['0.2', '0.3', '0.4', '0.5'],
'description': 'The message protocol version'}},
'required': ['msg_id', 'session', 'date', 'source', 'target', 'msg_type',
'version']},
'base_message':
{'description': 'ZeroMQ Plugin message format as json-schema (inspired '
'by IPython messaging format)',
'type': 'object',
'properties':
{'header': {'$ref': '#/definitions/header'},
'parent_header':
{'description':
'In a chain of messages, the header from the parent is copied so that '
'clients can track where messages come from.',
'$ref': '#/definitions/header'},
'metadata': {'type': 'object',
'description': 'Any metadata associated with the message.',
'properties': {'transfer_encoding':
{'type': 'string',
'default': '8bit'}}},
'content': {'type': 'object',
'description': 'The actual content of the message must be a '
'dict, whose structure depends on the message type.'}},
'required': ['header']},
'execute_request':
{'description': 'Request to perform an execution request.',
'allOf': [{'$ref': '#/definitions/base_message'},
{'properties':
{'content':
{'type': 'object',
'properties':
{'command': {'description':
'Command to be executed by the target',
'type': 'string'},
'data': {'description': 'The execution arguments.'},
'metadata': {'type': 'object',
'description': 'Contains any metadata that '
'describes the output.'},
'silent': {'type': 'boolean',
'description': 'A boolean flag which, if True, '
'signals the plugin to execute this code as '
'quietly as possible. silent=True will *not* '
'broadcast output on the IOPUB channel.',
'default': False},
'stop_on_error':
{'type': 'boolean',
'description': 'A boolean flag, which, if True, does not '
'abort the execution queue, if an exception is '
'encountered. This allows the queued execution of multiple'
' execute_requests, even if they generate exceptions.',
'default': False}},
'required': ['command']}}}]},
'error':
{'properties':
{'ename': {'type': 'string',
'description': "Exception name, as a string"},
'evalue': {'type': 'string',
'description': "Exception value, as a string"},
'traceback': {"type": "array",
'description':
"The traceback will contain a list of frames, represented "
"each as a string."}},
'required': ['ename']},
'execute_reply':
{'description': 'Response from an execution request.',
'allOf': [{'$ref': '#/definitions/base_message'},
{'properties':
{'content':
{'type': 'object',
'properties':
{'command': {'description': 'Command executed',
'type': 'string'},
'status': {'type': 'string',
'enum': ['ok', 'error', 'abort']},
'execution_count':
{'type': 'number',
'description': 'The execution counter that increases by one'
' with each request.'},
'data': {'description': 'The execution result.'},
'metadata': {'type': 'object',
'description': 'Contains any metadata that '
'describes the output.'},
'silent': {'type': 'boolean',
'description': 'A boolean flag which, if True, '
'signals the plugin to execute this code as '
'quietly as possible. silent=True will *not* '
'broadcast output on the IOPUB channel.',
'default': False},
'error': {'$ref': '#/definitions/error'}},
'required': ['command', 'status', 'execution_count']}}}],
'required': ['content']},
'connect_request':
{'description': 'Request to get basic information about the plugin hub, '
'such as the ports the other ZeroMQ sockets are listening on.',
'allOf': [{'$ref': '#/definitions/base_message'}]},
'connect_reply':
{'description': 'Basic information about the plugin hub.',
'allOf': [{'$ref': '#/definitions/base_message'},
{'properties':
{'content':
{'type': 'object',
'properties':
{'command': {'type': 'object',
'properties': {'uri': {'type': 'string'},
'port': {'type': 'number'},
'name': {'type': 'string'}},
'required': ['uri', 'port', 'name']},
'publish': {'type': 'object',
'properties': {'uri': {'type': 'string'},
'port': {'type': 'number'}},
'required': ['uri', 'port']}},
'required': ['command', 'publish']}}}],
'required': ['content', 'parent_header']}
},
}
def get_schema(definition):
schema = copy.deepcopy(MESSAGE_SCHEMA)
schema['allOf'] = [{'$ref': '#/definitions/%s' % definition}]
return schema
message_types = (['base_message'] + MESSAGE_SCHEMA['definitions']['header']
['properties']['msg_type']['enum'])
MESSAGE_SCHEMAS = dict([(k, get_schema(k)) for k in message_types])
# Pre-construct a validator for each message type.
MESSAGE_VALIDATORS = dict([(k, jsonschema.Draft4Validator(v))
for k, v in MESSAGE_SCHEMAS.iteritems()])
def validate(message):
'''
Validate message against message types defined in `MESSAGE_SCHEMA`.
Args:
message (dict) : One of the message types defined in `MESSAGE_SCHEMA`.
Returns:
(dict) : Message. A `jsonschema.ValidationError` is raised if
validation fails.
'''
MESSAGE_VALIDATORS['base_message'].validate(message)
# Message validated as a basic message. Now validate as specific type.
msg_type = message['header']['msg_type']
MESSAGE_VALIDATORS[msg_type].validate(message)
return message
def decode_content_data(message):
'''
Validate message and decode data from content according to mime-type.
Args:
message (dict) : One of the message types defined in `MESSAGE_SCHEMA`.
Returns:
(object) : Return deserialized object from `content['data']` field of
message. A `RuntimeError` is raised if `content['error']` field is
set.
'''
validate(message)
error = message['content'].get('error', None)
if error is not None:
raise RuntimeError(error)
mime_type = 'application/python-pickle'
transfer_encoding = 'BASE64'
metadata = message['content'].get('metadata', None)
if metadata is not None:
mime_type = metadata.get('mime_type', mime_type)
transfer_encoding = metadata.get('transfer_encoding',
transfer_encoding)
data = message['content'].get('data', None)
if data is None:
return None
# If content data was base64 encoded, decode it.
#
# [1]: https://www.w3.org/Protocols/rfc1341/5_Content-Transfer-Encoding.html
if transfer_encoding == 'BASE64':
data = base64.b64decode(data)
if mime_type == 'application/python-pickle':
# Pickle object.
return pickle.loads(data)
elif mime_type == 'application/x-yaml':
return yaml.loads(data)
elif mime_type == 'application/json':
return json.loads(data)
elif mime_type in ('application/octet-stream', 'text/plain'):
return data
else:
raise ValueError('Unrecognized mime-type: %s' % mime_type)
def encode_content_data(data, mime_type='application/python-pickle',
transfer_encoding='BASE64'):
content = {}
if data is not None:
if mime_type == 'application/python-pickle':
# Pickle object.
content['data'] = pickle.dumps(data, protocol=-1)
elif mime_type == 'application/x-yaml':
content['data'] = yaml.dumps(data)
elif mime_type is None or mime_type in ('application/octet-stream',
'application/json',
'text/plain'):
content['data'] = data
# Encode content data as base64, if necessary.
#
# [1]: https://www.w3.org/Protocols/rfc1341/5_Content-Transfer-Encoding.html
if transfer_encoding == 'BASE64':
content['data'] = base64.b64encode(content['data'])
if mime_type is not None:
content['metadata'] = {'mime_type': mime_type}
return content
def get_header(source, target, message_type, session=None):
return {'msg_id': str(uuid.uuid4()),
'session' : session or str(uuid.uuid4()),
'date': arrow.now().isoformat(),
'source': source,
'target': target,
'msg_type': message_type,
'version': '0.4'}
def get_connect_request(source, target):
'''
Construct a `connect_request` message.
Args:
source (str) : Source name/ZMQ identifier.
target (str) : Target name/ZMQ identifier.
Returns:
(dict) : A `connect_request` message.
'''
header = get_header(source, target, 'connect_request')
return {'header': header}
def get_connect_reply(request, content):
'''
Construct a `connect_reply` message.
Args:
request (dict) : The `connect_request` message corresponding to the
reply.
content (dict) : The content of the reply.
Returns:
(dict) : A `connect_reply` message.
'''
header = get_header(request['header']['target'],
request['header']['source'],
'connect_reply',
session=request['header']['session'])
return {'header': header,
'parent_header': request['header'],
'content': content}
def get_execute_request(source, target, command, data=None,
mime_type='application/python-pickle',
transfer_encoding='BASE64', silent=False,
stop_on_error=False):
'''
Construct an `execute_request` message.
Args:
source (str) : Source name/ZMQ identifier.
target (str) : Target name/ZMQ identifier.
command (str) : Name of command to execute.
data (dict) : Keyword arguments to command.
mime_type (dict) : Mime-type of requested data serialization format.
By default, data is serialized using `pickle`.
silent (bool) : A boolean flag which, if `True`, signals the plugin to
execute this code as quietly as possible. If `silent=True`, reply
will *not* broadcast output on the IOPUB channel.
stop_on_error (bool) : A boolean flag, which, if `True`, does not abort
the execution queue, if an exception is encountered. This allows
the queued execution of multiple `execute_request` messages, even
if they generate exceptions.
Returns:
(dict) : An `execute_request` message.
'''
header = get_header(source, target, 'execute_request')
content = {'command': command, 'silent': silent,
'stop_on_error': stop_on_error}
content.update(encode_content_data(data, mime_type=mime_type,
transfer_encoding=transfer_encoding))
return {'header': header, 'content': content}
def get_execute_reply(request, execution_count, status='ok', error=None,
data=None, mime_type='application/python-pickle',
transfer_encoding='BASE64', silent=None):
'''
Construct an `execute_reply` message.
Args:
request (dict) : The `execute_request` message corresponding to the
reply.
execution_count (int) : The number execution requests processed by
plugin, including the request corresponding to the reply.
status (str) : One of `'ok', 'error', 'abort'`.
error (exception) : Exception encountered during processing of request
(if applicable).
data (dict) : Result data.
mime_type (dict) : Mime-type of requested data serialization format.
By default, data is serialized using `pickle`.
silent (bool) : A boolean flag which, if `True`, signals the plugin to
execute this code as quietly as possible. If `silent=True`, reply
will *not* broadcast output on the IOPUB channel. If `None`,
silent setting from request will be used.
Returns:
(dict) : An `execute_reply` message.
'''
header = get_header(request['header']['target'],
request['header']['source'],
'execute_reply',
session=request['header']['session'])
if status == 'error' and error is None:
raise ValueError('If status is "error", `error` must be provided.')
content = {'execution_count': execution_count,
'status': status,
'command': request['content']['command'],
'silent': request['content'].get('silent')
if silent is None else silent}
content.update(encode_content_data(data, mime_type=mime_type,
transfer_encoding=transfer_encoding))
if error is not None:
content['error'] = str(error)
return {'header': header,
'parent_header': request['header'],
'content': content} | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/zmq_plugin/schema.py | schema.py |
from collections import OrderedDict
import inspect
import itertools
import json
import logging
import re
import zmq
import jsonschema
from .schema import validate, get_connect_reply, get_execute_reply
logger = logging.getLogger(__name__)
class Hub(object):
def __init__(self, query_uri, name='hub'):
'''
Central **hub** to connect a network of plugin instances.
## Thread-safety ##
All socket configuration, registration, etc. is performed *only* when
the `reset` method is called explicitly. Thus, all sockets are created
in the thread that calls the `reset` method.
By creating sockets in the thread the calls `reset`, it is
straightforward to, for example, run a `Plugin` in a separate process
or thread.
Args:
query_uri (str) : The URI address of the **hub** query socket.
Plugins connect to the query socket to register and query
information about other sockets.
name (str) : Unique name across all plugins.
'''
host_cre = re.compile(r'^(?P<transport>[^:]+)://(?P<host>[^:]+)(:(?P<port>\d+)?)')
match = host_cre.search(query_uri)
self.transport = match.group('transport')
self.host = match.group('host')
self.name = name
self.query_uri = query_uri
self.query_socket = None
# Command URI is determined at time of binding (bound to random port).
self.command_uri = None
self.command_socket = None
self.publish_uri = None
self.publish_socket = None
# Registry of connected plugins.
self.registry = OrderedDict()
@property
def logger(self):
'''
Return logger configured with a name in the following form:
<module_name>.<class_name>.<method_name>->"<self.name>"
'''
return logging.getLogger('.'.join((__name__, str(type(self).__name__),
inspect.stack()[1][3]))
+ '->"%s"' % self.name)
def reset(self):
'''
Reset the plugin state.
This includes:
- Resetting the execute reply identifier counter.
- Resetting the `publish`, `query`, and `command` sockets.
'''
self.execute_reply_id = itertools.count(1)
self.reset_publish_socket()
self.reset_query_socket()
self.reset_command_socket()
def reset_query_socket(self):
'''
Create and configure *query* socket (existing socket is destroyed if it
exists).
'''
context = zmq.Context.instance()
if self.query_socket is not None:
self.query_socket.close()
self.query_socket = None
# Create command socket and assign name as identity.
self.query_socket = zmq.Socket(context, zmq.REP)
self.query_socket.bind(self.query_uri)
def reset_command_socket(self):
'''
Create and configure *command* socket (existing socket is destroyed if
it exists).
'''
context = zmq.Context.instance()
if self.command_socket is not None:
self.command_socket.close()
self.command_socket = None
# Create command socket and assign name as identity.
self.command_socket = zmq.Socket(context, zmq.ROUTER)
self.command_socket.setsockopt(zmq.IDENTITY, bytes(self.name))
base_uri = "%s://%s" % (self.transport, self.host)
self.command_port = self.command_socket.bind_to_random_port(base_uri)
self.command_uri = base_uri + (':%s' % self.command_port)
def reset_publish_socket(self):
'''
Create and configure *publish* socket (existing socket is destroyed if
it exists).
'''
context = zmq.Context.instance()
if self.publish_socket is not None:
self.publish_socket.close()
self.publish_socket = None
# Create publish socket and assign name as identity.
self.publish_socket = zmq.Socket(context, zmq.PUB)
base_uri = "%s://%s" % (self.transport, self.host)
self.publish_port = self.publish_socket.bind_to_random_port(base_uri)
self.publish_uri = base_uri + (':%s' % self.publish_port)
def query_send(self, message):
self.query_socket.send(message)
def on_execute__register(self, request):
source = request['header']['source']
# Add name of client to registry.
self.registry[source] = source
self.logger.debug('Added "%s" to registry', source)
# Respond with registry contents.
return self.registry
def on_execute__ping(self, request):
return 'pong'
def on_query_recv(self, msg_frames):
'''
Process multi-part message from query socket.
This method may, for example, be called asynchronously as a callback in
run loop through a `ZMQStream(...)` configuration. See [here][1] for
more details.
Args:
msg_frames (list) : Multi-part ZeroMQ message.
Returns:
None
[1]: http://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/multisocket/tornadoeventloop.html
'''
# Publish raw message frames to *publish* socket.
try:
# Decode message from first (and only expected) frame.
request = json.loads(msg_frames[0])
# Validate message against schema.
validate(request)
except jsonschema.ValidationError:
self.logger.error('unexpected request', exc_info=True)
self.reset_query_socket()
try:
self.publish_socket.send_multipart(map(str,
[request['header']['source'],
request['header']['target'],
request['header']
['msg_type'],
msg_frames[0]]))
message_type = request['header']['msg_type']
if message_type == 'connect_request':
reply = self._process__connect_request(request)
elif message_type == 'execute_request':
reply = self._process__execute_request(request)
else:
raise RuntimeError('Unrecognized message type: %s' %
message_type)
reply['header']['source'] = self.name
reply_json = json.dumps(reply)
self.query_send(reply_json)
self.publish_socket.send_multipart(map(str,
[reply['header']['source'],
reply['header']['target'],
reply['header']
['msg_type'], reply_json]))
except:
self.logger.error('Error processing request.', exc_info=True)
self.reset_query_socket()
def on_command_recv(self, msg_frames):
'''
Process multi-part message from *command* socket.
Only `execute_request` and `execute_reply` messages are expected.
Messages are expected under the following scenarios:
1. A plugin submitting an execution request or reply to another
plugin.
2. A plugin submitting an execution request or reply to the **hub**.
In case 1, the `source` and `target` in the message header **MUST**
both be present in the local registry (i.e., `self.registry`).
In case 2, the `source` in the message header **MUST** be present in
the local registry (i.e., `self.registry`) and the `target` **MUST** be
equal to `self.name`.
This method may, for example, be called asynchronously as a callback in
run loop through a `ZMQStream(...)` configuration. See [here][1] for
more details.
Args:
msg_frames (list) : Multi-part ZeroMQ message.
Returns:
None
[1]: http://learning-0mq-with-pyzmq.readthedocs.org/en/latest/pyzmq/multisocket/tornadoeventloop.html
'''
try:
source, null, message_str = msg_frames
except:
self.logger.error('Unexpected message', exc_info=True)
return
try:
# Decode message from first (and only expected) frame.
message = json.loads(message_str, encoding='utf-8')
# Validate message against schema.
validate(message)
except jsonschema.ValidationError:
self.logger.error('Unexpected message', exc_info=True)
return
except UnicodeDecodeError:
import pdb; pdb.set_trace()
return
# Message has been validated. Verify message source matches header.
try:
if not message['header']['source'] == source:
raise NameError('Message source (%s) does not header source '
'field (%s).' % (source,
message['header']['source']))
except:
self.logger.error('Source mismatch.', exc_info=True)
return
# Determine whether target is another plugin or the **hub** and process
# message accordingly.
target = message['header']['target']
if source in self.registry and target in self.registry:
# Both *source* and *target* are present in the local registry.
# Forward message to *target* plugin.
self._process__forwarding_command_message(message)
elif (source in self.registry and target == self.name):
# Message *source* is in the local registry and *target* is
# **hub**.
self._process__local_command_message(message)
else:
error_msg = ('Unsupported source(%s)/target(%s) '
'configuration. Either source and target both '
'present in the local registry, or the source '
'**MUST** be a plugin in the local registry and '
'the target **MUST** be the **hub**.' % (source,
target))
logger.info(error_msg)
if ((message['header']['msg_type'] == 'execute_request') and
not message['content'].get('silent')):
# Send error response to source of execution request.
reply = get_execute_reply(message,
self.execute_reply_id.next(),
error=IndexError(error_msg))
self._send_command_message(reply)
def _send_command_message(self, message):
'''
Serialize message to json and send to target over command socket.
Args:
message (dict) : Message to send.
Returns:
(str) : Message serialized as json. Can be used, for example, to
broadcast message over publish socket.
'''
message_json = json.dumps(message)
msg_frames = map(str, [message['header']['target'], '', message_json])
self.command_socket.send_multipart(msg_frames)
return message_json
def _process__forwarding_command_message(self, message):
'''
Process validated message from *command* socket, which is addressed
from one plugin to another.
In addition to forwarding the message to the *target* plugin through
the *command* socket, the message *MUST* be published to the *publish*
socket.
Args:
message (dict) : Message to forward to *target*.
Returns:
None
'''
message_json = self._send_command_message(message)
if 'content' in message and not message['content'].get('silent'):
msg_frames = [message['header']['source'],
message['header']['target'],
message['header']['msg_type'], message_json]
self.publish_socket.send_multipart(map(str, msg_frames))
def _process__local_command_message(self, message):
'''
Process validated message from *command* socket, where the **hub** is
either the *source* or the *target* (not both).
In addition to sending reply to the *target* plugin through the
*command* socket, the message *MUST* be published to the *publish*
socket.
Args:
message (dict) : Message to forward to *target*.
Returns:
None
'''
message_json = json.dumps(message)
if 'content' in message and not message['content'].get('silent'):
msg_frames = [message['header']['source'],
message['header']['target'],
message['header']['msg_type'], message_json]
self.publish_socket.send_multipart(map(str, msg_frames))
message_type = message['header']['msg_type']
if message_type == 'execute_request':
reply = self._process__execute_request(message)
reply_json = self._send_command_message(reply)
if not message['content'].get('silent'):
msg_frames = [reply['header']['source'],
reply['header']['target'],
reply['header']['msg_type'], reply_json]
self.publish_socket.send_multipart(map(str, msg_frames))
elif message_type == 'execute_reply':
self._process__execute_reply(message)
else:
self.logger.error('Unrecognized message type: %s', message_type)
def _process__connect_request(self, request):
'''
Process validated `connect_request` message, where the source field of
the header is used to add the plugin to the registry.
Args:
request (dict) : `connect_request` message
Returns:
(dict) : `connect_reply` message.
'''
source = request['header']['source']
# Add name of client to registry.
self.registry[source] = source
# Send list of registered clients.
socket_info = {'command': {'uri': self.command_uri,
'port': self.command_port,
'name': self.name},
'publish': {'uri': self.publish_uri,
'port': self.publish_port}}
reply = get_connect_reply(request, content=socket_info)
return validate(reply)
def _process__execute_request(self, request):
'''
Process validated `execute_request` message, which includes the name of
the command to execute.
If a method with the name `on_execute__<command>` exists, call the
method on the `request` and send the return value wrapped in an
`execute_reply` message to the source of the request.
If the no matching method exists or if an exception is encountered
while processing the command, send `execute_reply` message with
corresponding error information to the source of the request.
Args:
request (dict) : `execute_request` message
Returns:
(dict) : `execute_reply` message
'''
try:
func = getattr(self, 'on_execute__' +
request['content']['command'], None)
if func is None:
error = NameError('Unrecognized command: %s' %
request['content']['command'])
reply = get_execute_reply(request,
self.execute_reply_id.next(),
error=error)
else:
result = func(request)
reply = get_execute_reply(request,
self.execute_reply_id.next(),
data=result)
return validate(reply)
except (Exception, ), exception:
return get_execute_reply(request, self.execute_reply_id.next(),
error=exception)
def _process__execute_reply(self, reply):
'''
Process validated `execute_reply` message.
If a callback function was registered during the execution request call
the callback function on the reply message.
Args:
reply (dict) : `execute_reply` message
Returns:
None
'''
try:
session = reply['header']['session']
if session in self.callbacks:
# A callback was registered for the corresponding request.
# Call callback with reply.
func = self.callbacks[session]
func(reply)
else:
# No callback registered for session.
pass
except:
self.logger.error('Processing error.', exc_info=True) | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/zmq_plugin/hub.py | hub.py |
import pprint
from multiprocessing import Process
import logging
import zmq
from zmq.eventloop import ioloop, zmqstream
from tornado.ioloop import PeriodicCallback
logger = logging.getLogger(__name__)
def run_hub(task):
logging.basicConfig(level=logging.DEBUG)
task.reset()
# Register on receive callback.
task.command_stream = zmqstream.ZMQStream(task.command_socket)
task.command_stream.on_recv(task.on_command_recv)
# Register on receive callback.
task.query_stream = zmqstream.ZMQStream(task.query_socket)
task.query_stream.on_recv(task.on_query_recv)
def dump_registry():
print '\n' + (72 * '*') + '\n'
print task.registry
print '\n' + (72 * '*') + '\n'
try:
ioloop.install()
logger.info('Starting hub ioloop')
PeriodicCallback(dump_registry, 100,
io_loop=ioloop.IOLoop.instance()).start()
ioloop.IOLoop.instance().start()
except RuntimeError:
logger.warning('IOLoop already running.')
def run_plugin(task):
logging.basicConfig(level=logging.DEBUG)
task.reset()
# Register on receive callback.
task.command_stream = zmqstream.ZMQStream(task.command_socket)
task.command_stream.on_recv(task.on_command_recv)
# Register on receive callback.
task.query_stream = zmqstream.ZMQStream(task.subscribe_socket)
task.query_stream.on_recv(task.on_subscribe_recv)
try:
ioloop.install()
logger.info('Starting plugin %s ioloop' % task.name)
ioloop.IOLoop.instance().start()
except RuntimeError:
logger.warning('IOLoop already running.')
if __name__ == '__main__':
import time
from ..hub import Hub
from ..plugin import Plugin
logging.basicConfig(level=logging.DEBUG)
hub_process = Process(target=run_hub,
args=(Hub('tcp://*:12345', 'hub') ,))
hub_process.daemon = False
hub_process.start()
plugin_process = Process(target=run_plugin,
args=[Plugin('plugin_a',
'tcp://localhost:12345')])
plugin_process.daemon = False
plugin_process.start()
print '\n' + (72 * '=') + '\n'
plugin_b = Plugin('plugin_b', 'tcp://localhost:12345')
plugin_b.reset()
#print '\n' + (72 * '=') + '\n'
#for i in xrange(3):
## Send "ping" from `'plugin_b'` `'plugin_a'`
#logger.info('''Send "ping" from `'plugin_b'` `'plugin_a'`''')
#plugin_b.send_command('plugin_a', 'ping')
#logger.info('''Wait for command response to be received by `'plugin_b'`''')
#frames = plugin_b.command_recv()
#plugin_b.on_command_recv(frames)
#print '\n' + (72 * '-') + '\n'
#print '\n# Plugin B subscribed message dump #\n'
#while True:
#try:
#logger.info(pprint.pformat(plugin_b.subscribe_socket
#.recv_pyobj(zmq.NOBLOCK)))
#except zmq.Again:
#break
plugin_process.terminate()
hub_process.terminate() | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/zmq_plugin/examples/demo.py | demo.py |
from multiprocessing import Process
import logging
import sys
from . import verify_tornado
verify_tornado()
import zmq
from zmq.eventloop import ioloop, zmqstream
logger = logging.getLogger(__name__)
def run_plugin(task, log_level=None):
if log_level is not None:
logging.basicConfig(level=log_level)
task.reset()
# Register on receive callback.
task.command_stream = zmqstream.ZMQStream(task.command_socket)
task.command_stream.on_recv(task.on_command_recv)
# Register on receive callback.
task.query_stream = zmqstream.ZMQStream(task.subscribe_socket)
task.query_stream.on_recv(task.on_subscribe_recv)
try:
ioloop.install()
logger.info('Starting plugin %s ioloop' % task.name)
ioloop.IOLoop.instance().start()
except RuntimeError:
logger.warning('IOLoop already running.')
def run_plugin_process(uri, name, subscribe_options, log_level):
from ..plugin import Plugin
plugin_process = Process(target=run_plugin,
args=(Plugin(name, uri, subscribe_options),
log_level))
plugin_process.daemon = False
plugin_process.start()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('-s', '--subscribe-opts', type=str, default=None)
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
if args.subscribe_opts is not None:
subscribe_opts = dict([[v.strip() for v in
args.subscribe_opts.split(':')]
for kv in args.subscribe_opts.split(',')])
args.subscribe_opts = dict([(getattr(zmq, k), v)
for k, v in subscribe_opts.iteritems()])
return args
if __name__ == '__main__':
args = parse_args()
logging.basicConfig(level=args.log_level)
run_plugin_process(args.hub_uri, args.name, args.subscribe_opts,
args.log_level) | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/zmq_plugin/bin/plugin.py | plugin.py |
import json
import logging
import sys
import time
from zmq_plugin.plugin import Plugin
from zmq_plugin.schema import validate
import arrow
import IPython
import jsonschema
import zmq
logger = logging.getLogger(__name__)
def run_plugin(plugin, log_level=None):
if log_level is not None:
logging.basicConfig(level=log_level)
plugin.reset()
def get_message():
msg_frames = plugin.subscribe_socket.recv_multipart(zmq.NOBLOCK)
message_str = msg_frames[-1]
try:
# Decode message from first (and only expected) frame.
message = json.loads(message_str)
# Validate message against schema.
validate(message)
except jsonschema.ValidationError:
logger.error('Unexpected message', exc_info=True)
raise
else:
return message
start = arrow.now()
while True:
try:
try:
message = get_message()
except zmq.Again:
time.sleep(.1)
continue
msg_timestamp = arrow.get(message['header']['date'])
delta_time = (msg_timestamp - start).total_seconds()
time_info = msg_timestamp.strftime('%H:%M:%S')
if delta_time > .25:
time_info += (' +%-5.1f' % delta_time)
print 72 * '-'
if message['header']['msg_type'] == 'execute_reply':
print (time_info +
' [{header[target]}<-{header[source]}] '
'{content[command]}'.format(**message))
elif 'content' in message:
print (time_info +
' [{header[source]}->{header[target]}] '
'{content[command]}'.format(**message))
else:
print (time_info +
' [{header[source]}->{header[target]}] '
'<{header[msg_type]}>'.format(**message))
start = arrow.now()
except KeyboardInterrupt:
IPython.embed()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
args = parse_args()
plugin = Plugin(args.name, args.hub_uri, {zmq.SUBSCRIBE: ''})
run_plugin(plugin, args.log_level) | zmq-plugin | /zmq-plugin-0.2.post2.zip/zmq-plugin-0.2.post2/zmq_plugin/bin/monitor.py | monitor.py |
import os
import shutil
import zmq.auth
import argparse
import uuid
def generate_certificates(base_dir, users, overwrite):
''' Generate client and server CURVE certificate files'''
keys_dir = os.path.join(base_dir, 'certificates')
public_keys_dir = os.path.join(base_dir, 'public_keys')
secret_keys_dir = os.path.join(base_dir, 'private_keys')
# Create directories for certificates, remove old content if necessary
for d in [keys_dir, public_keys_dir, secret_keys_dir]:
if os.path.exists(d):
if overwrite:
shutil.rmtree(d)
os.mkdir(d)
else:
os.mkdir(d)
client_id = str(uuid.uuid4())
if users == 'all':
# create new keys in certificates dir
server_public_file, server_secret_file = zmq.auth.create_certificates(
keys_dir, "server"
)
client_public_file, client_secret_file = zmq.auth.create_certificates(
keys_dir, client_id
)
elif users == 'client':
client_public_file, client_secret_file = zmq.auth.create_certificates(
keys_dir, client_id
)
elif users == 'server':
server_public_file, server_secret_file = zmq.auth.create_certificates(
keys_dir, "server"
)
# move public keys to appropriate directory
for key_file in os.listdir(keys_dir):
if key_file.endswith(".key"):
shutil.move(
os.path.join(keys_dir, key_file), os.path.join(public_keys_dir, '.')
)
# move secret keys to appropriate directory
for key_file in os.listdir(keys_dir):
if key_file.endswith(".key_secret"):
shutil.move(
os.path.join(keys_dir, key_file), os.path.join(secret_keys_dir, '.')
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', required=True, help="path where the keys are generated", type=str)
parser.add_argument('--users', nargs='?', help="generate for users: all, client or server. Defaults is client", type=str, const=1, default='client')
parser.add_argument('--overwrite', nargs='?', help="overwrite existing. default is False", type=bool, const=1, default=False)
args = parser.parse_args()
path = args.path
users = args.users
overwrite = args.overwrite
print(f'creating new certificates in {args.path}')
if zmq.zmq_version_info() < (4, 0):
raise RuntimeError(
"Security is not supported in libzmq version < 4.0. libzmq version {0}".format(
zmq.zmq_version()
)
)
generate_certificates(path, users, overwrite) | zmq-service-tools | /zmq_service_tools-0.25-py3-none-any.whl/service_tools/generate_certificates.py | generate_certificates.py |
import uuid
import os
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
import logging
import time
from configparser import ConfigParser
from .message import Message
import socket
import sys
import socketserver
class WorkerConfig(object):
@classmethod
def create_new(cls, config_path, *args, **kwargs):
"""
Creates a new server config file at config_path.
:param config_path: path where to store the config file; example: 'config.ini'
:param args:
:param kwargs:
:keyword id: id of the server; uuid.UUID4
:keyword name: name of the server; str
:keyword ip: ip address of the server; str; examples: 'localhost', '127.0.0.1'
:keyword port: port of the server; str; examples: 8005
:keyword backend_port: port used for communication with the workers; int; examples: 8006; optional; as default a free port between 9001 and 9050 is choosen
:keyword public_keys_dir: directory where public_keys are stored; str
:keyword secret_keys_dir: directory where secret_keys are stored; str
:keyword num_workers: number of workers which are created; int
:keyword log_dir: directory of log; str
:return:
"""
config = ConfigParser(allow_no_value=True)
if config_path is None:
raise ValueError(f'config_path is None')
config.add_section('main')
config.set('main', 'id', str(kwargs.get('id', uuid.uuid4())))
config.set('main', 'name', kwargs.get('name', None))
config.set('main', 'ip', str(kwargs.get('ip', None)))
config.set('main', 'port', str(kwargs.get('port', None)))
config.set('main', 'public_keys_dir', str(kwargs.get('public_keys_dir', None)))
config.set('main', 'secret_keys_dir', str(kwargs.get('secret_keys_dir', None)))
config.set('main', 'python_path', str(kwargs.get('python_path', '')))
config.add_section('logging')
config.set('logging', 'log_dir', kwargs.get('log_dir', None))
config.set('logging', 'logging_mode', kwargs.get('logging_mode', 'DEBUG'))
if not os.path.isfile(config_path):
f = open(config_path, 'a')
f.close()
with open(config_path, 'w') as f:
config.write(f)
return cls(config_path)
def __init__(self, config_path, *args, **kwargs):
self.config = ConfigParser()
self.config_path = config_path
if self.config_path is None:
raise ValueError(f'config_path is None')
self._id = None
self._name = None
self._public_keys_dir = None
self._secret_keys_dir = None
self._ip = None
self._port = None
self._python_path = None
# logging
self._log_dir = None
self._logging_mode = None
if not os.path.isfile(self.config_path):
raise Exception(f'{self.config_path} does not exist')
self.read_config()
@property
def id(self):
if self._id is None:
self.read_config()
return self._id
@id.setter
def id(self, value):
self.config.set('main', 'id', str(value))
self.write_config()
self._id = value
@property
def name(self):
if self._name is None:
self.read_config()
return self._name
@name.setter
def name(self, value):
self.config.set('main', 'name', value)
self.write_config()
self._name = value
@property
def ip(self):
if self._ip is None:
self.read_config()
return self._ip
@ip.setter
def ip(self, value):
self.config.set('main', 'ip', str(value))
self.write_config()
self._ip = value
@property
def port(self):
if self._port is None:
self.read_config()
return self._port
@port.setter
def port(self, value):
self.config.set('main', 'port', str(value))
self.write_config()
self._port = value
@property
def python_path(self):
if self._python_path is None:
self.read_config()
return self._python_path
@python_path.setter
def python_path(self, value):
self.config.set('main', 'python_path', str(value))
self.write_config()
self._python_path = value
@property
def public_keys_dir(self):
if self._public_keys_dir is None:
self.read_config()
return self._public_keys_dir
@public_keys_dir.setter
def public_keys_dir(self, value):
self.config.set('main', 'public_keys_dir', value)
self.write_config()
self._public_keys_dir = value
@property
def secret_keys_dir(self):
if self._secret_keys_dir is None:
self.read_config()
return self._secret_keys_dir
@secret_keys_dir.setter
def secret_keys_dir(self, value):
self.config.set('main', 'secret_keys_dir', value)
self.write_config()
self._secret_keys_dir = value
@property
def log_dir(self):
if self._log_dir is None:
self.read_config()
return self._log_dir
@log_dir.setter
def log_dir(self, value):
self.config.set('logging', 'log_dir', value)
self.write_config()
self._log_dir = value
@property
def logging_mode(self):
if self._logging_mode is None:
self.read_config()
return self._logging_mode
@logging_mode.setter
def logging_mode(self, value):
self.config.set('logging', 'logging_mode', value)
self.write_config()
self._logging_mode = value
def read_config(self):
if not os.path.isfile(self.config_path):
raise FileExistsError(f'{self.config_path} does not exist')
self.config.read(self.config_path)
try:
self._public_keys_dir = self.config.get('main', 'public_keys_dir', fallback=None)
except Exception as e:
raise Exception(f'Error: public_keys_dir in {self.config_path} does not exist')
try:
self._secret_keys_dir = self.config.get('main', 'secret_keys_dir', fallback=None)
except Exception as e:
raise Exception(f'Error: secret_keys_dir in {self.config_path} does not exist')
try:
self._port = self.config.getint('main', 'port', fallback=None)
except Exception as e:
print('port in {self.config_path} does not exist')
try:
self._name = self.config.get('main', 'name', fallback=None)
except Exception as e:
print('name in {self.config_path} does not exist')
try:
self._id = uuid.UUID(self.config.get('main', 'id', fallback=None))
except Exception as e:
raise Exception(f'Error: id in {self.config_path} does not exist')
try:
self._ip = self.config.get('main', 'ip', fallback=None)
except Exception as e:
print('ip in {self.config_path} does not exist. Assume localhost...')
self._ip = 'localhost'
try:
self._python_path = self.config.get('main', 'python_path', fallback=None)
except Exception as e:
print('python_path in {self.config_path} does not exist. Assume system python')
self._python_path = 'python'
##############################################
# logging
##############################################
try:
self._log_dir = self.config.get('logging', 'log_dir', fallback=None)
except Exception as e:
print('log_dir in {self.config_path} does not exist')
self._log_dir = None
try:
self._logging_mode = self.config.get('logging', 'logging_mode', fallback=None)
except Exception as e:
print('logging_mode in {self.config_path} does not exist')
self._logging_mode = 'DEBUG'
def write_config(self):
try:
with open(self.config_path, 'w') as f:
self.config.write(f)
except Exception as e:
print(f'error writing config: {e}')
class Worker(WorkerConfig):
def __init__(self, config_path, *args, **kwargs):
WorkerConfig.__init__(self, config_path, *args, **kwargs)
self.logger = None
self.init_logger()
self.fh = None # logger file handler
self.ch = None # logger console channel
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
@property
def address(self):
return f'tcp://{self.ip}:{self.port}'
@property
def logging_mode(self):
return self._logging_mode
@logging_mode.setter
def logging_mode(self, value):
self._logging_mode = value
# if self.logging_mode == 'DEBUG':
# level = logging.DEBUG
# elif self.logging_mode == 'INFO':
# level = logging.INFO
# elif self.logging_mode == 'WARN':
# level = logging.WARN
# elif self.logging_mode == 'ERROR':
# level = logging.ERROR
# else:
# level = logging.INFO
#
# self.logger.setLevel(level)
# self.fh.setLevel(level)
# self.ch.setLevel(level)
self.logger.info(f'logger level set to {value}')
self.update_logging_mode()
def get_free_port(self):
with socketserver.TCPServer(("localhost", 0), None) as s:
self.port = s.server_address[1]
return self.port
def init_logger(self):
self.logger = logging.getLogger(str(self.id))
log_filename = os.path.join(self.log_dir, f'worker_{str(self.id)}' + "." + 'log')
self.fh = logging.FileHandler(log_filename) # create file handler which logs even debug messages
self.ch = logging.StreamHandler() # create console handler
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
self.fh.setFormatter(formatter)
self.ch.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
self.update_logging_mode()
self.logger.info(f'logger started')
def update_logging_mode(self):
if self.logging_mode == 'DEBUG':
level = logging.DEBUG
elif self.logging_mode == 'INFO':
level = logging.INFO
elif self.logging_mode == 'WARN':
level = logging.WARN
elif self.logging_mode == 'ERROR':
level = logging.ERROR
else:
level = logging.INFO
self.logger.setLevel(level)
self.fh.setLevel(level)
self.ch.setLevel(level)
self.logger.info(f'logger level is {self.logging_mode}')
def start(self):
self.logger.info(f'starting on: {self.address}')
try:
self.socket.connect(self.address)
except Exception as e:
self.logger.error(f'error while starting worker: \n{e}')
return e
self.logger.info(f'worker started')
while True:
try:
message = self.socket.recv_pyobj()
self.logger.debug(f'receiving message')
except Exception as e:
self.logger.error(f'error receiving message:\n{e}')
self.socket.send_pyobj(e)
continue
try:
self.socket.send_pyobj(self.process_request(message))
except Exception as e:
self.logger.error(f'error while processing {message}:\n{e}')
self.socket.send_pyobj(e)
def shutdown(self):
sys.exit()
def process_request(self, message):
self.logger.debug(f'hello from new worker. processing request...')
method = getattr(self, message.method)
self.logger.debug(f'method to execute: {method}')
return getattr(self, message.method)(*message.args, **message.kwargs)
def check(self, *args, **kwargs):
return True
# def get_ip(self, *args, **kwargs):
# return 'try to get your ip'
# hostname = "max"
# ip_address = socket.gethostbyname(hostname)
def __del__(self):
self.logger.info(f'deleted') | zmq-service-tools | /zmq_service_tools-0.25-py3-none-any.whl/service_tools/worker.py | worker.py |
import uuid
import json
import zmq
import zmq.auth
from zmq.auth.thread import ThreadAuthenticator
import logging
from .worker import Worker
from .message import Message
from service_tools import start_worker
from configparser import ConfigParser
import os
from os import path
import socket
import shutil
import subprocess
class ServerConfig(object):
@classmethod
def create_new(cls, config_path, *args, **kwargs):
"""
Creates a new server config file at config_path.
:param config_path: path where to store the config file; example: 'config.ini'
:param args:
:param kwargs:
:keyword id: id of the server; uuid.UUID4
:keyword name: name of the server; str
:keyword ip: ip address of the server; str; examples: 'localhost', '127.0.0.1'
:keyword port: port of the server; str; examples: 8005
:keyword backend_port: port used for communication with the workers; int; examples: 8006; optional; as default a free port between 9001 and 9050 is choosen
:keyword public_keys_dir: directory where public_keys are stored; str
:keyword secret_keys_dir: directory where secret_keys are stored; str
:keyword num_workers: number of workers which are created; int
:keyword log_dir: directory of log; str
:return:
"""
config = ConfigParser()
if config_path is None:
raise ValueError(f'config_path is None')
config.add_section('main')
config.set('main', 'id', str(kwargs.get('id', uuid.uuid4())))
config.set('main', 'name', kwargs.get('name', None))
config.set('main', 'ip', kwargs.get('ip', None))
config.set('main', 'port', str(kwargs.get('port', -1)))
config.set('main', 'secure', str(kwargs.get('secure', True)))
config.set('main', 'backend_port', str(kwargs.get('backend_port', None)))
config.set('main', 'public_keys_dir', kwargs.get('public_keys_dir', None))
config.set('main', 'secret_keys_dir', kwargs.get('secret_keys_dir', None))
config.add_section('workers')
config.set('workers', 'num_workers', str(kwargs.get('num_workers', 1)))
config.set('workers', 'auto_start', str(kwargs.get('auto_start', True)))
config.set('workers', 'worker_config_paths', json.dumps(kwargs.get('worker_config_paths', None)))
config.set('workers', 'worker_script_path', str(kwargs.get('worker_script_path', None)))
config.add_section('logging')
config.set('logging', 'log_dir', kwargs.get('log_dir', None))
config.set('logging', 'logging_mode', kwargs.get('logging_mode', 'DEBUG'))
if not path.isfile(config_path):
f = open(config_path, 'a')
f.close()
with open(config_path, 'w') as f:
config.write(f)
return cls(config_path)
def __init__(self, config_path, *args, **kwargs):
self.config = ConfigParser()
self.config_path = config_path
if self.config_path is None:
raise ValueError(f'config_path is None')
self._id = None
self._name = None
self._secure = None
self._public_keys_dir = None
self._secret_keys_dir = None
self._ip = None
try:
self._ip = get_ip_address()
except Exception as e:
print(e)
self._port = None
self._backend_port = None
# workers
self._num_workers = None
self._auto_start = None
self._worker_script_path = None
# logging
self._log_dir = None
self._logging_mode = None
if not path.isfile(self.config_path):
raise Exception(f'{self.config_path} does not exist')
self.read_config()
@property
def id(self):
if self._id is None:
self.read_config()
return self._id
@id.setter
def id(self, value):
self.config.set('main', 'id', str(value))
self.write_config()
self._id = value
@property
def secure(self):
if self._secure is None:
self.read_config()
return self._secure
@secure.setter
def secure(self, value):
self.config.set('main', 'secure', value)
self.write_config()
self._secure = value
@property
def name(self):
if self._name is None:
self.read_config()
return self._name
@name.setter
def name(self, value):
self.config.set('main', 'name', value)
self.write_config()
self._name = value
@property
def backend_port(self):
if self._backend_port is None:
self.read_config()
return self._backend_port
@backend_port.setter
def backend_port(self, value):
self.config.set('main', 'backend_port', str(value))
self.write_config()
self._backend_port = value
@property
def ip(self):
if self._ip is None:
self.read_config()
return self._ip
@ip.setter
def ip(self, value):
self.config.set('main', 'ip', str(value))
self.write_config()
self._ip = value
@property
def port(self):
if self._port is None:
self.read_config()
return self._port
@port.setter
def port(self, value):
self.config.set('main', 'port', str(value))
self.write_config()
self._port = value
@property
def public_keys_dir(self):
if self._public_keys_dir is None:
self.read_config()
return self._public_keys_dir
@public_keys_dir.setter
def public_keys_dir(self, value):
self.config.set('main', 'public_keys_dir', value)
self.write_config()
self._public_keys_dir = value
@property
def secret_keys_dir(self):
if self._secret_keys_dir is None:
self.read_config()
return self._secret_keys_dir
@secret_keys_dir.setter
def secret_keys_dir(self, value):
self.config.set('main', 'secret_keys_dir', value)
self.write_config()
self._secret_keys_dir = value
@property
def num_workers(self):
if self._num_workers is None:
self.read_config()
return self._num_workers
@num_workers.setter
def num_workers(self, value):
self.config.set('workers', 'num_workers', str(value))
self.write_config()
self._num_workers = value
@property
def worker_config_paths(self):
if self._worker_config_paths is None:
self.read_config()
return self._worker_config_paths
@worker_config_paths.setter
def worker_config_paths(self, value):
self.config.set('workers', 'worker_config_paths', json.dumps(value))
self.write_config()
self._worker_config_paths = value
@property
def worker_script_path(self):
if self._worker_script_path is None:
self.read_config()
return self._worker_script_path
@worker_script_path.setter
def worker_script_path(self, value):
self.config.set('workers', 'worker_script_path', str(value))
self.write_config()
self._worker_script_path = value
@property
def auto_start(self):
if self._auto_start is None:
self.read_config()
return self._auto_start
@auto_start.setter
def auto_start(self, value):
self.config.set('workers', 'auto_start', str(value))
self.write_config()
self._auto_start = value
@property
def log_dir(self):
if self._log_dir is None:
self.read_config()
return self._log_dir
@log_dir.setter
def log_dir(self, value):
self.config.set('logging', 'log_dir', value)
self.write_config()
self._log_dir = value
@property
def logging_mode(self):
if self._logging_mode is None:
self.read_config()
return self._logging_mode
@logging_mode.setter
def logging_mode(self, value):
self.config.set('logging', 'logging_mode', value)
self.write_config()
self._logging_mode = value
def read_config(self):
if not path.isfile(self.config_path):
raise FileExistsError(f'{self.config_path} does not exist')
self.config.read(self.config_path)
try:
self._secure = self.config.getboolean('main', 'secure')
except Exception as e:
raise Exception(f'Error: secure in {self.config_path} does not exist')
try:
self._public_keys_dir = self.config.get('main', 'public_keys_dir')
except Exception as e:
raise Exception(f'Error: public_keys_dir in {self.config_path} does not exist')
try:
self._secret_keys_dir = self.config.get('main', 'secret_keys_dir')
except Exception as e:
raise Exception(f'Error: secret_keys_dir in {self.config_path} does not exist')
try:
self._ip = self.config.get('main', 'ip')
except Exception as e:
raise Exception(f'Error: ip in {self.config_path} does not exist')
try:
self._port = self.config.getint('main', 'port')
except Exception as e:
raise Exception(f'Error: port in {self.config_path} does not exist')
try:
self._backend_port = self.config.getint('main', 'backend_port')
except Exception as e:
print('port in {self.config_path} does not exist')
try:
self._name = self.config.get('main', 'name')
except Exception as e:
print('name in {self.config_path} does not exist')
try:
self._id = uuid.UUID(self.config.get('main', 'id'))
except Exception as e:
raise Exception(f'Error: id in {self.config_path} does not exist')
##############################################
# workers
##############################################
try:
self._num_workers = self.config.getint('workers', 'num_workers')
except Exception as e:
print('num_workers in {self.config_path} does not exist')
self._num_workers = 1
try:
self._auto_start = self.config.getboolean('workers', 'auto_start')
except Exception as e:
print('auto_start in {self.config_path} does not exist')
self._auto_start = True
try:
worker_config_paths = json.loads(self.config.get('workers', 'worker_config_paths'))
if not isinstance(worker_config_paths, list):
worker_config_paths = [worker_config_paths]
self.worker_config_paths = worker_config_paths
except Exception as e:
print('worker_config_paths in {self.config_path} does not exist')
self._worker_config_paths = True
try:
self._worker_script_path = self.config.get('workers', 'worker_script_path')
except Exception as e:
print('worker_script_path in {self.config_path} does not exist')
self._worker_script_path = None
##############################################
# logging
##############################################
try:
self._log_dir = self.config.get('logging', 'log_dir')
except Exception as e:
print('log_dir in {self.config_path} does not exist')
self._log_dir = None
try:
self._logging_mode = self.config.get('logging', 'logging_mode')
except Exception as e:
print('logging_mode in {self.config_path} does not exist')
self._logging_mode = 'DEBUG'
def write_config(self):
try:
with open(self.config_path, 'w') as f:
self.config.write(f)
except Exception as e:
print(f'error writing config: {e}')
class Server(ServerConfig):
def __init__(self, config_path, *args, **kwargs):
ServerConfig.__init__(self, config_path, *args, **kwargs)
self.fh = None # logger file handler
self.ch = None # logger console channel
self.logger = None
self.init_logger()
self.logging_mode = 'DEBUG'
ctx = zmq.Context.instance()
# Start an authenticator for this context.
if self.secure:
self.auth = ThreadAuthenticator(ctx)
self.auth.start()
# auth.allow('127.0.0.1')
# Tell authenticator to use the certificate in a directory
self.auth.configure_curve(domain='*', location=self.public_keys_dir)
server_secret_file = os.path.join(self.secret_keys_dir, "server.key_secret")
server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
self.server = ctx.socket(zmq.REP)
ctx = zmq.Context.instance()
self.frontend = ctx.socket(zmq.ROUTER)
if self.secure:
self.frontend.curve_secretkey = server_secret
self.frontend.curve_publickey = server_public
self.frontend.curve_server = True # must come before bind
if (self.port is None) or (self.port == 0):
self.port = self.server.bind_to_random_port('tcp://*', min_port=6001, max_port=6150, max_tries=100)
else:
self.frontend.bind(f'tcp://*:{self.port}')
self.workers = []
context = zmq.Context()
self.backend = context.socket(zmq.DEALER)
self.backend_port = self.backend.bind_to_random_port('tcp://*', min_port=9001, max_port=9050, max_tries=100)
self.logger.info(f'creat backend on port {self.backend_port}')
self.logger.info(f'starting {self.num_workers} workers...')
if self.auto_start:
self.logger.info(f'Auto start active')
self.start_workers()
self.logger.info(f'all workers started')
def init_logger(self):
self.logger = logging.getLogger(str(self.id))
log_filename = os.path.join(self.log_dir, f'server_{str(self.id)}' + "." + 'log')
self.fh = logging.FileHandler(log_filename) # create file handler which logs even debug messages
self.ch = logging.StreamHandler() # create console handler
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
self.fh.setFormatter(formatter)
self.ch.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(self.fh)
self.logger.addHandler(self.ch)
self.logger.info(f'logger started')
@property
def logging_mode(self):
return self._logging_mode
@logging_mode.setter
def logging_mode(self, value):
self.config.set('logging', 'logging_mode', value)
self.write_config()
self._logging_mode = value
if self.logging_mode == 'DEBUG':
level = logging.DEBUG
elif self.logging_mode == 'INFO':
level = logging.INFO
elif self.logging_mode == 'WARN':
level = logging.WARN
elif self.logging_mode == 'ERROR':
level = logging.ERROR
else:
level = logging.INFO
self.logger.setLevel(level)
self.fh.setLevel(level)
self.ch.setLevel(level)
self.logger.info(f'logger level set to {value}')
def add_worker(self, config_path, python_path):
# new_worker = Worker(config_path)
# self.workers.append(new_worker)
# new_worker.start()
if self.worker_script_path is None:
script_path = os.path.abspath(start_worker.__file__)
else:
script_path = self.worker_script_path
if not python_path:
python_path = 'python'
if os.path.isfile(script_path):
try:
self.logger.debug(f'starting worker:\n python path: {python_path}\n script path: {script_path}\n config path: {config_path}')
p = subprocess.Popen(f'{python_path} {script_path} --config_file={config_path}', shell=True)
self.logger.info(f'worker started: {p.poll()}')
self.workers.append(p)
except Exception as e:
self.logger.error(f'Error starting worker: {e}')
else:
self.logger.error(f'Error starting worker: {script_path} not found')
return
self.logger.info(f'worker started: {p.poll()}')
self.workers.append(p)
def start_workers(self):
for i in range(self.num_workers):
if self.worker_config_paths.__len__() == 0:
self.logger.error(f'error while starting workers. No worker config found')
return
if (self.worker_config_paths.__len__() - 1) < i:
worker_config_path = self.worker_config_paths[0]
dirname = os.path.dirname(worker_config_path)
filename = os.path.basename(worker_config_path)
base_filename = os.path.splitext(filename)[0]
extension = os.path.splitext(filename)[1]
new_worker_config_path = os.path.join(dirname, base_filename + f'__worker{i}_copy' + extension)
# copy the config and overwrite the id
shutil.copy2(worker_config_path, new_worker_config_path)
worker_config = ConfigParser()
worker_config.read(new_worker_config_path)
worker_config.set('main', 'id', str(uuid.uuid4()))
try:
with open(new_worker_config_path, 'w') as f:
worker_config.write(f)
except Exception as e:
print(f'error writing worker_config: {e}')
worker_config_path = new_worker_config_path
elif (self.worker_config_paths.__len__() - 1) >= i:
# overwrite port:
worker_config_path = self.worker_config_paths[i]
else:
self.logger.error(f'error while starting workers. No worker config found')
continue
worker_config = ConfigParser()
worker_config.read(worker_config_path)
try:
python_path = worker_config.get('main', 'python_path', fallback=None)
except Exception as e:
print('python_path in {self.config_path} does not exist. Assume system python')
python_path = 'python'
worker_config.set('main', 'ip', str(self.ip))
worker_config.set('main', 'port', str(self.backend_port))
try:
with open(worker_config_path, 'w') as f:
worker_config.write(f)
except Exception as e:
print(f'error writing worker_config: {e}')
self.logger.debug(f'updated worker config: ip {str(self.ip)}; port: {str(self.backend_port)}')
self.add_worker(worker_config_path, python_path)
def start(self):
self.logger.info(f'starting server {self.id} on port: {self.port}; backend port: {self.backend_port}')
try:
zmq.proxy(self.frontend, self.backend)
except Exception as e:
self.logger.error(f'Error while starting router proxy: {e}')
def __del__(self):
self.logger.info(f'stopping server')
self.backend_port = None
try:
for worker in self.workers:
worker.terminate()
except Exception as e:
pass
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0] | zmq-service-tools | /zmq_service_tools-0.25-py3-none-any.whl/service_tools/server.py | server.py |
import zmq
import logging
class ReqRepRouter:
def __init__(self, host, port):
logging.info("Starting request reply router")
self.frontend_url = f"tcp://{host}:{port}"
self.backend_url = f"tcp://{host}:{port+1}"
context = zmq.Context()
self.frontend = context.socket(zmq.ROUTER)
self.backend = context.socket(zmq.ROUTER)
self.req_res_stopped = False
# self.start_routing()
def start_request_reply_routing(self):
self.frontend.bind(self.frontend_url) # "tcp://127.0.0.1:5559"
self.backend.bind(self.backend_url) # "tcp://127.0.0.1:5560"
logging.info(f"Request frontend bound to {self.frontend_url}")
logging.info(f"Reply backend bound to {self.backend_url}")
poller = zmq.Poller()
poller.register(self.frontend, zmq.POLLIN)
poller.register(self.backend, zmq.POLLIN)
while not self.req_res_stopped:
socks = dict(poller.poll())
# message = frontend.recv_multipart()
# print(message)
# backend.send_multipart(message)
# identity, reply = backend.recv_multipart()
# print(f"got reply {identity}: {reply}")
if socks.get(self.frontend) == zmq.POLLIN:
message = self.frontend.recv_multipart()
logging.debug(
f"router got {message}"
) # Reqesting node sends source, destinaion, message, return_destination
logging.debug(
f"forwarding {message[1:]}"
) # the first item is the destination for a router socket. source is stripped and rest is given to backend router.
self.backend.send_multipart(message[1:])
if socks.get(self.backend) == zmq.POLLIN:
message = self.backend.recv_multipart()
logging.debug(
f"dealer got {message}"
) # Reply node appends the return destination as destination, source is stripped and forwared to frontend router
logging.debug(f"returning {message[1:]}")
self.frontend.send_multipart(message[1:])
if __name__ == "__main__":
R = ReqRepRouter("127.0.0.1", 6002)
R.start_request_reply_routing() | zmq-ses-communications | /zmq_ses_communications-0.1.2.tar.gz/zmq_ses_communications-0.1.2/zmq_ses_communications/server/req_res_router.py | req_res_router.py |
from .pub_sub_node import PubSubNode
from .req_res_node import ReqResNode
from .msgs.heartbeat_pb2 import HeartBeat
from .msgs.command_pb2 import CommandRequest
import logging
import os
import asyncio
class SES_Device(PubSubNode, ReqResNode):
def __init__(self, host, port, device_identity):
self.device_logger = logging.getLogger(f"SES_Device_{os.getpid()}")
self.device_identity = device_identity
PubSubNode.__init__(self, host, port)
ReqResNode.__init__(self, host, port + 2, device_identity)
# def add_subscriptions(self,subscriptions_list):
# register subscription callbacks
# register request callbacks
# async def node_main(self):
# await asyncio.gather(self.subscriber_loop())
def callback_msg_rcvd(self, topic, contents):
# Handle all the callbacks here
callback_fun = getattr(
self, "on_" + topic.decode() + "_received", None
) # Creation o
# print(f"received message in Node : {topic}: {message}")
try:
callback_fun(contents)
except Exception as e:
# print("Received : ", message)
self.device_logger.critical(
f"Calling on_{topic.decode()}_received method failed: {e}"
)
# self.on_message_received(message)
# callback_fun(source, request)
def shutdown_device(self):
self.stop_req_res()
self.stop_pub_sub()
def add_publisher(self, publisher_function):
future = asyncio.run_coroutine_threadsafe(publisher_function(), self.pub_loop)
if __name__ == "__main__":
logging.basicConfig(
level=logging.DEBUG,
format="%(name)s %(threadName)s %(asctime)s [%(levelname)s] : %(message)s",
)
dev = SES_Device("127.0.0.1", 6000, "A")
dev.create_publisher()
dev2 = SES_Device("127.0.0.1", 6000, "B")
dev2.create_publisher()
dev.send_request("B", "REQUEST from A")
dev2.send_request("A", "REQUEST from B") | zmq-ses-communications | /zmq_ses_communications-0.1.2.tar.gz/zmq_ses_communications-0.1.2/zmq_ses_communications/client/Device.py | Device.py |
import asyncio
import zmq
import zmq.asyncio
from zmq.asyncio import Context
import os
import time
import sys
import random
from threading import Thread, get_ident
import logging
class Responder(Thread):
def __init__(self, host, port, identitiy):
Thread.__init__(self)
self.responder_url = f"tcp://{host}:{port}"
self.responder_ctx = Context.instance()
self.responder_identitiy = identitiy
self.responder_logger = logging.getLogger("Responder")
self.responder_loop = asyncio.new_event_loop()
async def announce_service(self):
self.responder_socket = self.responder_ctx.socket(zmq.DEALER)
self.responder_socket.setsockopt(
zmq.IDENTITY, self.responder_identitiy.encode("utf-8")
)
self.responder_logger.info(f"Responder connecting to {self.responder_url}")
self.responder_socket.connect(self.responder_url) # "tcp://127.0.0.1:5560"
self.responder_logger.info(f"Announcing presence to {self.responder_url}")
await self.responder_socket.send(b"READY")
async def response_loop(self):
await self.announce_service()
while True:
# source, service, request = self.responder_socket.recv_multipart()
await asyncio.sleep(0.001)
request, source = await self.responder_socket.recv_multipart()
self.responder_logger.debug(
f"got request : {request} from : {source} for service : {self.responder_identitiy} threadid : {get_ident()}"
)
self.on_request_received(source, request)
await self.responder_socket.send_multipart([source, b"OK"])
def run_reply_loop(self):
asyncio.set_event_loop(self.responder_loop)
self.responder_loop.create_task(self.response_loop())
self.responder_loop.run_forever()
# asyncio.run(self.response_loop())
def stop_reply_loop(self):
self.responder_socket.disconnect(self.responder_url)
self.responder_loop.stop()
while self.responder_loop.is_running():
self.responder_logger.debug(f"Still running")
self.responder_logger.info(f"Closed the sub")
if __name__ == "__main__":
name = "s1"
if len(sys.argv) > 1:
name = sys.argv[1]
R = Responder("127.0.0.1", 5560, name)
asyncio.run(R.response_loop()) | zmq-ses-communications | /zmq_ses_communications-0.1.2.tar.gz/zmq_ses_communications-0.1.2/zmq_ses_communications/client/reply.py | reply.py |
import asyncio
import time
import zmq
import zmq.asyncio
from zmq.asyncio import Context
import os
import psutil
from threading import Thread
import logging
class Subscriber(Thread):
def __init__(self, host, port):
self.subscriber_logger = logging.getLogger("Subscriber")
Thread.__init__(self)
self.subscriber_url = f"tcp://{host}:{port}"
self.subscriber_logger.info(f"Subscriber connecting to {self.subscriber_url}")
self.subscriber_ctx = Context.instance()
self.process = psutil.Process(os.getpid())
self.sub_loop = asyncio.new_event_loop()
self.subscribed_messages = []
def setup_subscriptions(self, subscriptions):
self.subsciptions = subscriptions
self.subscriber = self.subscriber_ctx.socket(zmq.SUB)
self.subscriber.connect(self.subscriber_url)
for prefix in subscriptions:
self.add_subscription(prefix)
def add_subscription(self, message_name):
self.subscriber.setsockopt(zmq.SUBSCRIBE, message_name.encode())
self.subscriber_logger.info(f"Setting subscription for msg : {message_name}")
self.subscribed_messages.append(message_name)
def remove_subscription(self, message_name):
self.subscriber.setsockopt(zmq.UNSUBSCRIBE, message_name.encode())
self.subscriber_logger.info(f"removing subscription for msg : {message_name}")
async def subscriber_loop(self, subsciptions):
self.setup_subscriptions(subsciptions)
while True:
# print("receiving msg")
await asyncio.sleep(0.001)
await self.receive_message()
async def receive_message(self):
topic, contents = await self.subscriber.recv_multipart()
# future.add_done_callback(self.callback_msg_rcvd)
#
self.subscriber_logger.debug(
f"Received topic : {topic} PUB, contents : {contents}"
)
self.callback_msg_rcvd(topic, contents)
def callback_msg_rcvd(self, topic, contents):
pass
# async def do_something(self):
# while True:
# await asyncio.sleep(0.0)
# #
# # print("Doing house keeping")
def run_sub(self, prefix):
asyncio.set_event_loop(self.sub_loop)
self.sub_loop.create_task(self.subscriber_loop(prefix))
self.sub_loop.run_forever()
def stop_sub_loop(self):
self.subscriber.disconnect(self.subscriber_url)
self.sub_loop.stop()
while self.sub_loop.is_running():
self.subscriber_logger.info(f"Still running")
self.subscriber_logger.info(f"Closed the sub")
if __name__ == "__main__":
s = Subscriber("127.0.0.1", 6001)
asyncio.run(s.subscriber_loop()) | zmq-ses-communications | /zmq_ses_communications-0.1.2.tar.gz/zmq_ses_communications-0.1.2/zmq_ses_communications/client/async_sub.py | async_sub.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='command.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\rcommand.proto\"E\n\x0e\x43ommandRequest\x12\x0f\n\x07request\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x12\n\ncommand_id\x18\x03 \x01(\x05\"S\n\x0c\x43ommandReply\x12\x0f\n\x07request\x18\x01 \x01(\t\x12\x0e\n\x06\x61\x63tion\x18\x02 \x01(\t\x12\x12\n\ncommand_id\x18\x03 \x01(\x05\x12\x0e\n\x06result\x18\x04 \x01(\tb\x06proto3')
)
_COMMANDREQUEST = _descriptor.Descriptor(
name='CommandRequest',
full_name='CommandRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request', full_name='CommandRequest.request', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action', full_name='CommandRequest.action', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command_id', full_name='CommandRequest.command_id', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=17,
serialized_end=86,
)
_COMMANDREPLY = _descriptor.Descriptor(
name='CommandReply',
full_name='CommandReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request', full_name='CommandReply.request', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action', full_name='CommandReply.action', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command_id', full_name='CommandReply.command_id', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='result', full_name='CommandReply.result', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=171,
)
DESCRIPTOR.message_types_by_name['CommandRequest'] = _COMMANDREQUEST
DESCRIPTOR.message_types_by_name['CommandReply'] = _COMMANDREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CommandRequest = _reflection.GeneratedProtocolMessageType('CommandRequest', (_message.Message,), dict(
DESCRIPTOR = _COMMANDREQUEST,
__module__ = 'command_pb2'
# @@protoc_insertion_point(class_scope:CommandRequest)
))
_sym_db.RegisterMessage(CommandRequest)
CommandReply = _reflection.GeneratedProtocolMessageType('CommandReply', (_message.Message,), dict(
DESCRIPTOR = _COMMANDREPLY,
__module__ = 'command_pb2'
# @@protoc_insertion_point(class_scope:CommandReply)
))
_sym_db.RegisterMessage(CommandReply)
# @@protoc_insertion_point(module_scope) | zmq-ses-communications | /zmq_ses_communications-0.1.2.tar.gz/zmq_ses_communications-0.1.2/zmq_ses_communications/client/msgs/command_pb2.py | command_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='heartbeat.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0fheartbeat.proto\"\xc2\x01\n\tHeartBeat\x12\x13\n\x0b\x64\x65vice_name\x18\x01 \x01(\t\x12\x11\n\tdevice_id\x18\x02 \x01(\x05\x12\x17\n\x0f\x64\x65vice_lifetime\x18\x03 \x01(\x05\x12&\n\x0c\x64\x65vice_state\x18\x04 \x01(\x0e\x32\x10.HeartBeat.State\"L\n\x05State\x12\x0b\n\x07OFFLINE\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\t\n\x05\x45RROR\x10\x02\x12\x06\n\x02S1\x10\x03\x12\x06\n\x02S2\x10\x04\x12\x06\n\x02S3\x10\x05\x12\x06\n\x02S4\x10\x06\x62\x06proto3')
)
_HEARTBEAT_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='HeartBeat.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OFFLINE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S1', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S2', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S3', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S4', index=6, number=6,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=138,
serialized_end=214,
)
_sym_db.RegisterEnumDescriptor(_HEARTBEAT_STATE)
_HEARTBEAT = _descriptor.Descriptor(
name='HeartBeat',
full_name='HeartBeat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='device_name', full_name='HeartBeat.device_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_id', full_name='HeartBeat.device_id', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_lifetime', full_name='HeartBeat.device_lifetime', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_state', full_name='HeartBeat.device_state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_HEARTBEAT_STATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=20,
serialized_end=214,
)
_HEARTBEAT.fields_by_name['device_state'].enum_type = _HEARTBEAT_STATE
_HEARTBEAT_STATE.containing_type = _HEARTBEAT
DESCRIPTOR.message_types_by_name['HeartBeat'] = _HEARTBEAT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HeartBeat = _reflection.GeneratedProtocolMessageType('HeartBeat', (_message.Message,), dict(
DESCRIPTOR = _HEARTBEAT,
__module__ = 'heartbeat_pb2'
# @@protoc_insertion_point(class_scope:HeartBeat)
))
_sym_db.RegisterMessage(HeartBeat)
# @@protoc_insertion_point(module_scope) | zmq-ses-communications | /zmq_ses_communications-0.1.2.tar.gz/zmq_ses_communications-0.1.2/zmq_ses_communications/client/msgs/heartbeat_pb2.py | heartbeat_pb2.py |
[](https://pypi.org/project/zmq-tubes/)


# ZMQ Tubes
ZMQ Tubes is a managing system for ZMQ communication.
It can manage many ZMQ sockets by one interface.
The whole system is hierarchical, based on topics
(look at [MQTT topics](https://www.hivemq.com/blog/mqtt-essentials-part-5-mqtt-topics-best-practices/)).
## Classes
- **TubeMessage** - This class represents a request/response message.
Some types of tubes require a response in this format.
- **Tube** - This class wraps a ZMQ socket.
It represents a connection between client and server.
- **TubeMonitor** - The class can sniff of the ZMQTube communication.
- **TubeNode** - This represents an application interface for communication via tubes.
## Asyncio / Threading
The library support bot method. Asyncio from Python 3.7.
```python
from zmq_tubes import TubeNode, Tube # Asyncio classes
from zmq_tubes.threads import TubeNode, Tube # Threads classes
```
## Usage:
### Node definitions in yml file
We can define all tubes for one TubeNode by yml file.
Next examples require install these packages `PyYAML`, `pyzmq` and `zmq_tubes`.
#### Client service (asyncio example)
```yaml
# client.yml
tubes:
- name: Client REQ
addr: ipc:///tmp/req.pipe
tube_type: REQ
topics:
- foo/bar
- name: Client PUB
addr: ipc:///tmp/pub.pipe
tube_type: PUB
topics:
- foo/pub/#
```
```python
# client.py
import asyncio
import yaml
from zmq_tubes import TubeNode, TubeMessage
async def run():
with open('client.yml', 'r+') as fd:
schema = yaml.safe_load(fd)
node = TubeNode(schema=schema)
async with node:
print(await node.request('foo/bar', 'message 1'))
await node.publish('foo/pub/test', 'message 2')
if __name__ == "__main__":
asyncio.run(run())
```
```shell
> python client.py
topic: foo/bar, payload: response
```
#### Server service (threads example)
```yaml
# server.yml
tubes:
- name: server ROUTER
addr: ipc:///tmp/req.pipe
tube_type: ROUTER
server: True
topics:
- foo/bar
- name: server SUB
addr: ipc:///tmp/pub.pipe
tube_type: SUB
server: True
topics:
- foo/pub/#
```
```python
# server.py
import yaml
from zmq_tubes.threads import TubeNode, TubeMessage
def handler(request: TubeMessage):
print(request.payload)
if request.tube.tube_type_name == 'ROUTER':
return request.create_response('response')
def run():
with open('server.yml', 'r+') as fd:
schema = yaml.safe_load(fd)
node = TubeNode(schema=schema)
node.register_handler('foo/#', handler)
with node:
node.start().join()
if __name__ == "__main__":
run()
```
```shell
> python server.py
message 1
message 2
```
### YAML definition
The yaml file starts with a root element `tubes`, which contains list of all our tube definitions.
- `name` - string - name of the tube.
- `addr` - string - connection or bind address in format `transport://address` (see more http://api.zeromq.org/2-1:zmq-connect)
- `server` - bool - is this tube server side (bind to `addr`) or client side (connect to `addr`)
- `tube_type` - string - type of this tube (see more https://zguide.zeromq.org/docs/chapter2/#Messaging-Patterns)
- `identity` - string - (optional) we can setup custom tube identity
- `utf8_decoding` - bool - (default = True), if this is True, the payload is automatically UTF8 decode.
- `sockopts` - dict - (optional) we can setup sockopts for this tube (see more http://api.zeromq.org/4-2:zmq-setsockopt)
- `monitor` - string - (optional) bind address of tube monitor (see more [Debugging / Monitoring](#debugging-/-monitoring))
### Request / Response
This is a simple scenario, the server processes the requests serially.
#### Server:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
async def handler(request: TubeMessage):
print(request.payload)
return 'answer'
# or return request.create_response('response')
tube = Tube(
name='Server',
addr='ipc:///tmp/req_resp.pipe',
server=True,
tube_type='REP'
)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.start()
# output: 'question'
```
#### Client:
```python
from zmq_tubes import Tube, TubeNode
tube = Tube(
name='Client',
addr='ipc:///tmp/req_resp.pipe',
tube_type='REQ'
)
node = TubeNode()
node.register_tube(tube, 'test/#')
response = await node.request('test/xxx', 'question')
print(response.payload)
# output: 'answer'
```
The method `request` accepts the optional parameter `utf8_decoding`. When we set this parameter to `False` in previous
example, the returned payload is not automatically decoded, we get bytes.
### Subscribe / Publisher
#### Server:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
async def handler(request: TubeMessage):
print(request.payload)
tube = Tube(
name='Server',
addr='ipc:///tmp/sub_pub.pipe',
server=True,
tube_type='SUB'
)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.start()
# output: 'message'
```
#### Client:
```python
from zmq_tubes import Tube, TubeNode
tube = Tube(
name='Client',
addr='ipc:///tmp/sub_pub.pipe',
tube_type='PUB'
)
# In the case of publishing, the first message is very often
# lost. The workaround is to connect the tube manually as soon as possible.
tube.connect()
node = TubeNode()
node.register_tube(tube, 'test/#')
node.publish('test/xxx', 'message')
```
### Request / Router
The server is asynchronous. It means it is able to process
more requests at the same time.
#### Server:
```python
import asyncio
from zmq_tubes import Tube, TubeNode, TubeMessage
async def handler(request: TubeMessage):
print(request.payload)
if request.payload == 'wait':
await asyncio.sleep(10)
return request.create_response(request.payload)
tube = Tube(
name='Server',
addr='ipc:///tmp/req_router.pipe',
server=True,
tube_type='ROUTER'
)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.start()
# output: 'wait'
# output: 'message'
```
#### Client:
```python
import asyncio
from zmq_tubes import Tube, TubeNode
tube = Tube(
name='Client',
addr='ipc:///tmp/req_router.pipe',
tube_type='REQ'
)
async def task(node, text):
print(await node.request('test/xxx', text))
node = TubeNode()
node.register_tube(tube, 'test/#')
asyncio.create_task(task(node, 'wait'))
asyncio.create_task(task(node, 'message'))
# output: 'message'
# output: 'wait'
```
### Dealer / Response
The client is asynchronous. It means it is able to send
more requests at the same time.
#### Server:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
async def handler(request: TubeMessage):
print(request.payload)
return 'response'
# or return requset.create_response('response')
tube = Tube(
name='Server',
addr='ipc:///tmp/dealer_resp.pipe',
server=True,
tube_type='REP'
)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.start()
# output: 'message'
```
#### Client:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
tube = Tube(
name='Client',
addr='ipc:///tmp/dealer_resp.pipe',
tube_type='DEALER'
)
async def handler(response: TubeMessage):
print(response.payload)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.send('test/xxx', 'message')
# output: 'response'
```
### Dealer / Router
The client and server are asynchronous. It means it is able to send and process
more requests/responses at the same time.
#### Server:
```python
import asyncio
from zmq_tubes import Tube, TubeNode, TubeMessage
async def handler(request: TubeMessage):
print(request.payload)
if request.payload == 'wait':
await asyncio.sleep(10)
return request.create_response(request.payload)
tube = Tube(
name='Server',
addr='ipc:///tmp/dealer_router.pipe',
server=True,
tube_type='ROUTER'
)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.start()
# output: 'wait'
# output: 'message'
```
#### Client:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
tube = Tube(
name='Client',
addr='ipc:///tmp/dealer_router.pipe',
tube_type='DEALER'
)
async def handler(response: TubeMessage):
print(response.payload)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.send('test/xxx', 'wait')
await node.send('test/xxx', 'message')
# output: 'message'
# output: 'wait'
```
### Dealer / Dealer
The client and server are asynchronous. It means it is able to send and process
more requests/responses at the same time.
#### Server:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
tube = Tube(
name='Server',
addr='ipc:///tmp/dealer_dealer.pipe',
server=True,
tube_type='DEALER'
)
async def handler(response: TubeMessage):
print(response.payload)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.send('test/xxx', 'message from server')
# output: 'message from client'
```
#### Client:
```python
from zmq_tubes import Tube, TubeNode, TubeMessage
tube = Tube(
name='Client',
addr='ipc:///tmp/dealer_dealer.pipe',
tube_type='DEALER'
)
async def handler(response: TubeMessage):
print(response.payload)
node = TubeNode()
node.register_tube(tube, 'test/#')
node.register_handler('test/#', handler)
await node.send('test/xxx', 'message from client')
# output: 'message from server'
```
## Debugging / Monitoring
We can assign a monitor socket to our zmq tubes. By this monitor socket, we can sniff zmq communication or get a zmq tube
configuration.
```yaml
tubes:
- name: ServerRouter
addr: ipc:///tmp/router.pipe
monitor: ipc:///tmp/test.monitor
tube_type: ROUTER
server: yes
topics:
- foo/#
```
This is example of a yaml definition. We can use the same monitor socket for more tubes in the same tubeNode.
When we add the monitor attribute to our tube definition, the application automatically create a new socket monitor:
`/tmp/test.monitor`. Your application works as a server side. The logs are sent to the socket only for the time, when the monitoring
tool is running.
### Monitoring tool
After enabling of the monitoring in the application, we can use the monitoring tool for sniff.
```shell
# get the server tube configuration
> zmqtube-monitor get_schema ipc:///tmp/display.monitor
tubes:
- addr: ipc:///tmp/router.pipe
monitor: ipc:///tmp/test.monitor
name: ServerRouter
server: 'yes'
tube_type: ROUTER
# the log tube communication. Logs will be saved to dump.rec as well.
> zmqtube-monitor logs -d ./dump.rec ipc:///tmp/display.monitor
0.28026580810546875 ServerRouter < foo/test Request
0.0901789665222168 ServerRouter > foo/test Response
# The format of output
# <relative time> <tube name> <direction> <topic> <message>`
```
### Simulation of the client side
When we have a dump file (e.g. `dump.rec`), we can simulate the communication with our app.
The first step is prepare the mock client schema file.
For this, We can get the tube node configuration from our application and after that edit it.
```shell
> zmqtube-monitor get_schema ipc:///tmp/display.monitor > mock_schema.yaml
> vim mock_schema.yaml
...
# Now, we have to update the file mock_schema.yaml.
# We change configuration to the mock client configuration.
# The names of the tubes must be the same as are in your app.
# We can remove monitoring attribute and change server and
# tube_type attributes. In this mock file, the topics are not
# required, they are ignored.
> cat mock_schema.yaml
tubes:
- addr: ipc:///tmp/router.pipe
name: ServerRouter
tube_type: REQ
```
Now, we can start the simulation of the client communication.
```shell
> zmqtube-monitor simulate mock_schema.yaml dump.rec
```
If the response of our app is not the same as tool expects (the response saved in dump file), then
the monitoring tool warns us.
We can modify speed of the simulation by the parameter `--speed`.
In the default configuration, is the simulation run the same
speed as original communication (parameter `--speed=1`).
| Speed | description |
| :-: | :- |
| 0 | no blocking simulation |
| 0.5 | twice faster than original |
| 1 | original speed |
| 2 | twice slower than original |
### Example of programming declaration of the monitoring.
```python
import zmq
from zmq_tubes.threads import Tube, TubeNode, TubeMessage, TubeMonitor
def handler(request: TubeMessage):
print(request.payload)
return request.create_response('response')
resp_tube = Tube(
name='REP',
addr='ipc:///tmp/rep.pipe',
server='yes',
tube_type=zmq.REP
)
req_tube = Tube(
name='REQ',
addr='ipc:///tmp/rep.pipe',
tube_type=zmq.REQ
)
node = TubeNode()
node.register_tube(resp_tube, f"foo/#")
node.register_tube(req_tube, f"foo/#")
node.register_handler(f"foo/#", handler)
node.register_monitor(resp_tube, TubeMonitor(addr='ipc:///tmp/test.monitor'))
with node:
print(node.request('foo/xxx', 'message 2'))
```
| zmq-tubes | /zmq_tubes-1.14.0.tar.gz/zmq_tubes-1.14.0/README.md | README.md |
import argparse
import json
import logging
import sys
import time
import zmq
from zmq_tubes.threads import TubeNode, Tube
last_result = None
last_time = None
try:
import yaml
except ImportError:
sys.stderr.write("For the simulation is required install "
"pyaml package.")
sys.exit(1)
def get_socket(addr: str):
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.connect(addr)
return socket
def logs(addr, dump_file, notime, print_stdout=True):
"""
Log ZMQ communication
:param addr: address of monitor socket
:param dump_file: file descriptor of file for same ZMQMessages
:param notime: bool: disable printing of relative time (default is False)
:param print_stdout bool: print ZMQMessages to stdout
"""
socket = get_socket(addr)
socket.send(b'__enabled__', flags=zmq.NOBLOCK)
try:
while True:
if socket.poll(zmq.POLLIN):
data = socket.recv_multipart()
if data:
if data[0] == b'__connect__':
socket.send(b'__enabled__', flags=zmq.NOBLOCK)
continue
elif data[0] == b'__disconnect__':
break
if dump_file:
dump_file.write(b' '.join(data) + b'\n')
if print_stdout:
data = [m.decode('utf-8', 'backslashreplace')
for m in data]
if notime:
data.pop(0)
print(' '.join(data))
except KeyboardInterrupt:
pass
def get_schema(addr):
"""
Get ZMQTube schema from connected application
:param addr: address of monitor socket
"""
socket = get_socket(addr)
socket.send(b'__get_schema__')
for _ in range(10):
data = socket.recv_multipart()
if data and data[0] == b'__schema__':
return json.loads(data[1].decode())
def simulate_speed(rtime, speed):
global last_time
if last_time:
dt = rtime * speed - (time.time() - last_time)
if dt > 0:
time.sleep(dt)
last_time = time.time()
def simulate_send(node: TubeNode, line, speed):
global last_result
rtime, tube_name, direction, msg = \
line.decode().rstrip().split(' ', 3)
if direction != '<':
if direction == '>' and last_result and last_result != msg:
logging.warning("The request result is different: "
f"'{msg}' != '{last_result}'")
last_result = None
return
msg = msg.split(' ', 1)
topic = msg.pop(0)
data = msg.pop(0) if msg else ''
tube = node.get_tube_by_name(tube_name)
if not tube:
sys.stderr.write(f'The tube {tube_name} does not exist.\n')
return
if speed:
simulate_speed(float(rtime), speed)
if tube.tube_type == zmq.REQ:
res = tube.request(topic, data, timeout=-1)
last_result = b' '.join(res.format_message()[-2:]).decode()
else:
tube.send(topic, data)
def simulate(schema_yaml, dump_file, speed):
"""
Simulate ZMQ communication
:param schema_yaml: the file descriptor of simulator definition
:param dump_file: the file descriptor of ZMQTube dump communication
:param speed: float - speed of playback 0 - no blocking, 1 - real time
"""
schema = yaml.safe_load(schema_yaml)
node = TubeNode()
for tube_info in schema['tubes']:
if 'monitor' in tube_info:
del tube_info['monitor']
tube = Tube(**tube_info)
node.register_tube(tube, ['#'])
with node:
while True:
line = dump_file.readline()
if not line:
break
simulate_send(node, line, speed)
def main():
parser = argparse.ArgumentParser(
prog='ZMQ TubeNode monitor',
description='This tool can monitor zmq TubeNode')
parser.add_argument('-v', '--verbose', help='Verbose.', action='store_true')
subparsers = parser.add_subparsers(help='sub-command help')
# Schema
parser_schema = subparsers.add_parser('get_schema',
help='Get tubeNode schema')
parser_schema.add_argument('socket', help='Path to monitor socket.')
parser_schema.set_defaults(
func=lambda args: print(yaml.dump(get_schema(args.socket)))
)
# Logs
parser_logs = subparsers.add_parser('logs',
help='Logs tubeNode communication.')
parser_logs.add_argument('socket', help='Path to monitor socket.')
parser_logs.add_argument('--notime', action='store_true',
help='Does not show relative time')
parser_logs.add_argument('-d', '--dump', type=argparse.FileType('wb'),
help='Output dump file')
parser_logs.set_defaults(func=lambda args: logs(args.socket, args.dump,
args.notime))
# Simulate
parser_sim = subparsers.add_parser('simulate',
help='Simulate tubeNode communication.')
parser_sim.add_argument('schema', type=argparse.FileType('r'),
help='The tubeNode schema file')
parser_sim.add_argument('dump', type=argparse.FileType('rb'),
help='Dump file')
parser_sim.add_argument('-s', '--speed', type=float, default=1,
help='Speed of simulation. 0 - no wait, '
'1 - real speed (default)')
parser_sim.set_defaults(func=lambda args: simulate(args.schema, args.dump,
args.speed))
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
args.func(args)
if __name__ == "__main__":
main() | zmq-tubes | /zmq_tubes-1.14.0.tar.gz/zmq_tubes-1.14.0/zmq_tubes/monitoring.py | monitoring.py |
class TopicMatcher:
class TopicNode(object):
__slots__ = 'children', 'content'
def __init__(self):
self.children = {}
self.content = None
def __init__(self):
self._root = self.TopicNode()
def set_topic(self, key, value):
node = self._root
if key and key[-1] == '/':
key = key[:-1]
for sym in key.split('/'):
node = node.children.setdefault(sym, self.TopicNode())
node.content = value
def get_topic(self, key, set_default=None):
node = self._root
if key and key[-1] == '/':
key = key[:-1]
for sym in key.split('/'):
node = node.children.get(sym)
if node is None:
if set_default is not None:
self.set_topic(key, set_default)
return set_default
return node.content
def filter(self, filter_topic: str):
"""
Return registered topics by filter_topic
:param filter_topic: str
:return: [str]
"""
def __rec(lst, node, _all=False, tt=None):
if not lst and not node.children:
return [('/'.join(tt), node.content)] if node.content else []
part = None
if _all:
res = []
for k, ch in node.children.items():
res += __rec([], ch, _all, tt + [k])
return res
elif lst and lst[0] in ['+', '#']:
part = lst[0]
lst = lst[1:]
res = []
for k, ch in node.children.items():
res += __rec(lst, ch, _all or part == '#', tt + [k])
return res
elif lst and lst[0] in node.children:
return __rec(lst[1:], node.children[lst[0]], _all,
tt + [lst[0]])
return []
if filter_topic and filter_topic[-1] == '/':
filter_topic = filter_topic[:-1]
return __rec(filter_topic.split('/'), self._root, False, [])
def matches(self, topic):
if topic and topic[-1] == '/':
topic = topic[:-1]
lst = topic.split('/')
lst_len = len(lst)
normal = not topic.startswith('$')
res = []
def __rec(node, i=0):
if i == lst_len:
if node.content:
res.append(node.content)
else:
part = lst[i]
if part in node.children:
__rec(node.children[part], i + 1)
if '+' in node.children and (normal or i > 0):
__rec(node.children['+'], i + 1)
if '#' in node.children and (normal or i > 0):
content = node.children['#'].content
if content:
res.append(content)
__rec(self._root)
return res
def match(self, topic, default=None):
res = self.matches(topic)
if res:
return res[0]
return default
def values(self) -> list:
_values = []
def __step(node):
if node.content and node.content not in _values:
_values.append(node.content)
for child in node.children.values():
__step(child)
__step(self._root)
return _values | zmq-tubes | /zmq_tubes-1.14.0.tar.gz/zmq_tubes-1.14.0/zmq_tubes/matcher.py | matcher.py |
import time
import concurrent
from threading import Thread, Lock, Event, current_thread
import zmq
from zmq import Poller, Context
from .manager import TubeMessage, Tube as AsyncTube, TubeNode as AsyncTubeNode,\
TubeMethodNotSupported, TubeMessageError, TubeMessageTimeout, \
TubeMonitor as AsyncTubeMonitor, TubeTopicNotConfigured, TubeConnectionError
class TubeThreadDeadLock(Exception): pass
class StoppableThread(Thread):
def __init__(self, *args, **kwargs):
self.stop_event = Event()
# kwargs['daemon'] = True
super().__init__(*args, **kwargs)
def is_stopped(self):
return self.stop_event.is_set()
def stop(self):
self.stop_event.set()
self.join(timeout=1)
class TubeMonitor(AsyncTubeMonitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Because singleton execute __init__ for each try.
if hasattr(self, 'lock') and self.lock:
return
self.context = Context.instance()
self.lock = Lock()
def connect(self):
self.raw_socket = self.context.socket(zmq.PAIR)
self.raw_socket.bind(self.addr)
self.raw_socket.__dict__['monitor'] = self
try:
with self.lock:
self.raw_socket.send(b'__connect__', flags=zmq.NOBLOCK)
except zmq.ZMQError:
# The monitor is not connected
pass
def close(self):
if self.raw_socket:
try:
with self.lock:
self.raw_socket.send(b'__disconnect__', flags=zmq.NOBLOCK)
time.sleep(.1)
except zmq.ZMQError:
# The monitor is not connected
pass
self.raw_socket.close(1)
self.raw_socket = None
self.enabled = False
def process(self):
if self.raw_socket:
self.__process_cmd(self.raw_socket.recv())
def send_message(self, msg: TubeMessage):
if self.raw_socket and self.enabled:
row_msg = self.__format_message(msg, '>')
with self.lock:
self.raw_socket.send_multipart(row_msg)
def receive_message(self, msg: TubeMessage):
if self.raw_socket and self.enabled:
row_msg = self.__format_message(msg, '<')
with self.lock:
self.raw_socket.send_multipart(row_msg)
class Tube(AsyncTube):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.lock = Lock()
self.context = Context().instance()
def send(self, *args, **kwargs):
if args:
if isinstance(args[0], TubeMessage):
return self.__send_message(*args, **kwargs)
elif isinstance(args[0], str):
return self.__send_payload(*args, **kwargs)
elif kwargs:
if 'message' in kwargs:
return self.__send_message(**kwargs)
elif 'topic' in kwargs:
return self.__send_payload(**kwargs)
raise NotImplementedError("Unknown type of topic")
def __send_payload(self, topic: str, payload=None, raw_socket=None):
"""
Send payload to topic.
:param topic - topic
:param payload - payload
:param raw_socket - zmqSocket, it used for non permanent connection
"""
message = TubeMessage(
self,
payload=payload,
topic=topic,
raw_socket=raw_socket if raw_socket else self.raw_socket
)
self.__send_message(message)
def __send_message(self, message: TubeMessage):
"""
Send message.
:param message - TubeMessage
"""
raw_msg = message.format_message()
self.logger.debug("Send (tube: %s) to %s", self.name, raw_msg)
if not message.raw_socket or message.raw_socket.closed:
raise TubeConnectionError(
f'The tube {message.tube.name} is already closed.')
if not self.lock.acquire(timeout=10):
raise TubeThreadDeadLock(f"The tube '{self.name}' waits more then "
f"10s for access to socket.")
try:
message.raw_socket.send_multipart(raw_msg)
try:
if self.monitor:
self.monitor.send_message(message)
except Exception as ex:
self.logger.error(
"The error with sending of an outgoing message "
"to the monitor tube.",
exc_info=ex)
except (TypeError, zmq.ZMQError) as ex:
raise TubeMessageError(
f"The message '{message}' does not be sent.") from ex
finally:
self.lock.release()
def request(self, *args, post_send_callback=None, **kwargs) -> TubeMessage:
"""
Send request
:param request: Optional[TubeMessage]
:param topic: Optional[str]
:param payload: Optional[dict]
:param timeout: int
:param post_send_callback: Optional[Callable]
:param utf8_decoding: bool (default True)
:return: TubeMessage
"""
if args:
if isinstance(args[0], TubeMessage):
return self.__request_message(
*args, post_send_callback=post_send_callback, **kwargs
)
elif isinstance(args[0], str):
return self.__request_payload(
*args, post_send_callback=post_send_callback, **kwargs
)
elif kwargs:
if 'message' in kwargs:
return self.__request_message(
post_send_callback=post_send_callback, **kwargs
)
elif 'topic' in kwargs:
return self.__request_payload(
post_send_callback=post_send_callback, **kwargs
)
raise NotImplementedError("Unknown type of topic")
def __request_payload(self, topic: str, payload=None, timeout=None,
post_send_callback=None, utf8_decoding=None):
request = TubeMessage(
self,
payload=payload,
topic=topic,
raw_socket=self.raw_socket,
)
return self.__request_message(request, timeout=timeout,
post_send_callback=post_send_callback,
utf8_decoding=utf8_decoding)
def __request_message(self, request: TubeMessage, timeout: int = 30,
post_send_callback=None, utf8_decoding=None):
if self.tube_type != zmq.REQ:
raise TubeMethodNotSupported(
f"The tube '{self.name}' (type: '{self.tube_type_name}') "
f"can request topic."
)
try:
self.send(request)
if post_send_callback:
post_send_callback(request)
if request.raw_socket.poll(timeout * 1000) != 0:
response = self.receive_data(
raw_socket=request.raw_socket,
utf8_decoding=utf8_decoding
)
if response.topic != request.topic:
raise TubeMessageError(
f"The response comes to different topic "
f"({request.topic} != {response.topic}).")
return response
finally:
if not self.is_persistent:
# self.logger.debug(f"Close tube {self.name}")
if request.raw_socket and not request.raw_socket.closed:
request.raw_socket.close(1000)
if self.is_closed:
raise TubeConnectionError(f'The tube {self.name} was closed.')
raise TubeMessageTimeout(
f"No answer for the request in {timeout}s. Topic: {request.topic}")
def receive_data(self, raw_socket=None, timeout=3, utf8_decoding=None):
if not raw_socket:
raw_socket = self.raw_socket
if not self.lock.acquire(timeout=timeout):
raise TubeThreadDeadLock(f"The tube '{self.name}' waits more then "
f"{timeout}s for access to socket.")
try:
raw_data = raw_socket.recv_multipart()
finally:
self.lock.release()
self.logger.debug(
f"Received (tube {self.name}): {raw_data}")
message = TubeMessage(tube=self, raw_socket=raw_socket)
message.parse(
raw_data,
self.utf8_decoding if utf8_decoding is None else utf8_decoding
)
try:
if self.monitor:
self.monitor.receive_message(message)
except Exception as ex:
self.logger.error("The error with sending of an incoming message "
"to the monitor tube.",
exc_info=ex)
return message
class TubeNode(AsyncTubeNode):
__TUBE_CLASS = Tube
__MONITOR_CLASS = TubeMonitor
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main_thread = None
self.max_workers = None
def __enter__(self):
self.connect()
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.close()
def connect(self):
"""
opens all persistent connections
"""
for tube in self.tubes:
tube.connect()
for monitor in self.__monitors:
monitor.connect()
def close(self):
"""
close all persistent connections
"""
for tube in self.tubes:
tube.close()
for monitor in self.__monitors:
monitor.close()
def send(self, topic: str, payload=None, tube=None):
if not tube:
tube = self.get_tube_by_topic(topic, [zmq.DEALER])
if not tube:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not '
f'assigned to any Tube for '
f'dealer.')
tube.send(topic, payload)
def request(self, topic: str, payload=None, timeout=30,
post_send_callback=None, utf8_decoding=None) -> TubeMessage:
tube = self.get_tube_by_topic(topic, [zmq.REQ])
if not tube:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not assigned '
f'to any Tube for request.')
res = tube.request(topic, payload, timeout=timeout,
post_send_callback=post_send_callback,
utf8_decoding=utf8_decoding)
return res
def publish(self, topic: str, payload=None):
"""
In the case with asyncio, the first message is very often lost.
The workaround is to connect the tube manually as soon as possible.
"""
tube = self.get_tube_by_topic(topic, [zmq.PUB])
if not tube:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not assigned '
f'to any Tube for publishing.')
self.send(topic, payload, tube)
def stop(self):
if self.main_thread:
self.main_thread.stop()
def start(self):
def _callback_wrapper(_callback, _request: TubeMessage):
tube = _request.tube
response = _callback(_request)
if not isinstance(response, TubeMessage):
if tube.tube_type in [zmq.ROUTER]:
raise TubeMessageError(
f"The response of the {tube.tube_type_name} "
f"callback has to be a instance of TubeMessage class.")
_payload = response
response = _request.create_response()
response.payload = _payload
else:
if tube.tube_type in [zmq.ROUTER] and\
response.request.identity != response.identity:
raise TubeMessageError(
"The TubeMessage response object doesn't be created "
"from request object.")
try:
tube.send(response)
except TubeConnectionError:
self.logger.warning(
f"The client (tube '{tube.name}') closes the socket for "
f"sending answer. Probably timeout.")
def _one_event(request):
callbacks = self.get_callback_by_topic(request.topic, request.tube)
if not callbacks:
if self.warning_not_mach_topic:
self.logger.warning(
f"Incoming message does not match any topic, "
f"it is ignored (topic: {request.topic})"
)
return
c_process = current_thread()
if request.tube.tube_type == zmq.SUB:
for callback in callbacks:
c_process.name = 'zmq/worker/sub'
callback(request)
elif request.tube.tube_type == zmq.REP:
c_process.name = 'zmq/worker/rep'
_callback_wrapper(callbacks[-1], request)
elif request.tube.tube_type == zmq.ROUTER:
c_process.name = 'zmq/worker/router'
_callback_wrapper(callbacks[-1], request)
elif request.tube.tube_type == zmq.DEALER:
c_process.name = 'zmq/worker/router'
callbacks[-1](request)
def _main_loop():
poller = Poller()
run_this_thread = False
for tube in self.tubes:
if tube.tube_type in [zmq.SUB, zmq.REP, zmq.ROUTER, zmq.DEALER]:
poller.register(tube.raw_socket, zmq.POLLIN)
run_this_thread = True
for monitor in self.__monitors:
poller.register(monitor.raw_socket, zmq.POLLIN)
run_this_thread = True
if not run_this_thread:
self.logger.debug("The main process is disabled, "
"There is not registered any supported tube.")
return
self.logger.info("The main process was started.")
cur_thread = current_thread()
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.max_workers,
thread_name_prefix='zmq/worker/') as executor:
while not cur_thread.is_stopped():
try:
events = poller.poll(timeout=100)
except zmq.error.ZMQError:
# This happens during shutdown
continue
for event in events:
# self.logger.debug(f"New event {event}")
raw_socket = event[0]
if isinstance(raw_socket, object) and \
'monitor' in raw_socket.__dict__:
try:
monitor = raw_socket.__dict__['monitor']
executor.submit(monitor.process)
except Exception as ex:
self.logger.error(
"The monitor event process failed.",
exc_info=ex)
continue
tube: Tube = raw_socket.__dict__['tube']
request = tube.receive_data(
raw_socket=raw_socket
)
req_tubes = self._tubes.match(request.topic)
if req_tubes and tube not in req_tubes:
# This message is not for this node.
# The topic is not registered for this node.
continue
executor.submit(_one_event, request)
self.logger.info("The main process was ended.")
if not self.main_thread:
self.main_thread = StoppableThread(target=_main_loop,
name='zmq/main')
self.main_thread.start()
time.sleep(.2) # wait for main thread is ready
return self.main_thread | zmq-tubes | /zmq_tubes-1.14.0.tar.gz/zmq_tubes-1.14.0/zmq_tubes/threads.py | threads.py |
import time
import sys
import asyncio
import json
import logging
from collections.abc import Callable
import zmq
from zmq import SocketOption
from zmq.asyncio import Poller, Context, Socket
from zmq_tubes.matcher import TopicMatcher
class TubeException(Exception): pass # flake8: E701
class TubeTopicNotConfigured(TubeException): pass # flake8: E701
class TubeMessageError(TubeException): pass # flake8: E701
class TubeMessageTimeout(TubeException): pass # flake8: E701
class TubeMethodNotSupported(TubeException): pass # flake8: E701
class TubeConnectionError(TubeException): pass # flake8: E701
LESS38 = sys.version_info < (3, 8)
SOCKET_OPTION_VALUE_TO_NAME = {
member.value: name for name, member in SocketOption.__members__.items()
}
TUBE_TYPE_MAPPING = {
'SUB': zmq.SUB,
'PUB': zmq.PUB,
'REQ': zmq.REQ,
'REP': zmq.REP,
'ROUTER': zmq.ROUTER,
'DEALER': zmq.DEALER,
'PAIR': zmq.PAIR
}
def flatten(llist):
if isinstance(llist, list):
return sum(llist, [])
return llist
class TubeMessage:
@staticmethod
def _format_string(data):
if isinstance(data, str):
return data.encode('utf8')
elif isinstance(data, (bytes, bytearray)):
return data
elif isinstance(data, (int, float)):
return str(data).encode('ascii')
elif data is None:
return b''
else:
raise TypeError(
'data must be a string, bytearray, int, float or None.')
def __init__(self, tube, **kwargs):
self.tube: Tube = tube
self.topic = kwargs.get('topic')
self.raw_socket = kwargs.get('raw_socket')
self.identity = kwargs.get('identity')
self.request: TubeMessage = kwargs.get('request')
self.payload = kwargs.get('payload', '')
def __repr__(self):
res = ''
if self.identity:
res = f"indentity: {self.identity}, "
return f"{res}topic: {self.topic}, payload: {self.payload}"
@property
def payload(self) -> str:
return self._payload
@payload.setter
def payload(self, value):
if isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value)
self._payload = value
def from_json(self):
return json.loads(self.payload)
def create_response(self, payload=None) -> 'TubeMessage':
return TubeMessage(
self.tube,
topic=self.topic,
raw_socket=self.raw_socket,
identity=self.identity,
request=self,
payload=payload
)
def parse(self, data, utf8_decoding=True):
if self.tube.tube_type == zmq.ROUTER:
if len(data) != 4:
raise TubeMessageError(
f"The received message (tube '{self.tube.name}') "
f"is in unknown format. '{data}'")
self.identity = data.pop(0)
data.pop(0)
elif self.tube.tube_type == zmq.DEALER:
if len(data) != 3:
raise TubeMessageError(
f"The received message (tube '{self.tube.name}') "
f"is in unknown format. '{data}'")
data.pop(0)
if len(data) != 2:
raise TubeMessageError(
f"The received message (tube '{self.tube.name}') "
f"is in unknown format. {data}")
self.topic, self.payload = data
self.topic = self.topic.decode('utf-8')
if utf8_decoding:
self.payload = self.payload.decode('utf-8')
def format_message(self):
response = []
if self.tube.tube_type == zmq.ROUTER:
response += [self.identity, b'']
if self.tube.tube_type == zmq.DEALER:
response.append(b'')
response += [self.topic, self.payload]
return [self._format_string(it) for it in response]
class Tube:
def __init__(self, **kwargs):
"""
Constructor Tube
:param addr:str address of tube
:param name:str name of tube
:param server:bool is this tube endpoint a server side (default False)
:param type:str or int type of tube
"""
self.logger = logging.getLogger(self.__class__.__name__)
self._socket: Socket = None
self.context = Context().instance()
self.tube_info = kwargs
self.is_closed = False
self._sockopts = {}
self.sockopts = kwargs.get('sockopts', {})
self.addr = kwargs.get('addr')
self.name = kwargs.get('name')
self._server = \
str(kwargs.get('server', '')).lower() in ('yes', 'true', '1')
self.tube_type = kwargs.get('tube_type')
self.identity = kwargs.get('identity')
self.monitor = kwargs.get('monitor')
self.utf8_decoding = kwargs.get('utf8_decoding', True)
@staticmethod
def get_tube_type_name(tube_type):
if isinstance(tube_type, int):
for key, val in TUBE_TYPE_MAPPING.items():
if tube_type == val:
return key
return tube_type
@property
def addr(self) -> str:
"""
returns the address
"""
return self._addr
@addr.setter
def addr(self, val: str):
"""
set the address (format: 'protocol://interface:port')
"""
if not val:
raise TubeException("The parameter 'addr' is required.")
self._addr = val
@property
def name(self) -> str:
"""
returns name of this tube or a tube address
"""
return self._name if self._name else self._addr
@name.setter
def name(self, val: str):
"""
set the name of this tube
"""
self._name = val
@property
def tube_type(self) -> int:
"""
returns the tube type
"""
return self._tube_type
@property
def tube_type_name(self) -> str:
return self.get_tube_type_name(self._tube_type)
@tube_type.setter
def tube_type(self, val):
"""
set the tube type
@param val : str|int
"""
if not isinstance(val, int):
self._tube_type = TUBE_TYPE_MAPPING.get(val)
if not self._tube_type:
raise TubeException(f"The tube '{self.name}' has got "
f"an unsupported tube_type.")
else:
if val not in TUBE_TYPE_MAPPING.values():
raise TubeException(f"The tube '{self.name}' has got "
f"an unsupported tube_type.")
self._tube_type = val
if self._tube_type == zmq.SUB:
self.add_sock_opt(zmq.SUBSCRIBE, '')
@property
def is_server(self) -> bool:
"""
Is the tube a server side?
"""
return self._server
@property
def is_persistent(self) -> bool:
"""
Is the tube persistent?
"""
return self.tube_type in [zmq.PUB, zmq.SUB, zmq.REP, zmq.ROUTER,
zmq.DEALER]
@property
def is_connected(self):
return self._socket is not None
@property
def raw_socket(self) -> Socket:
"""
returns a native ZMQ Socket. For persistent tubes this returns still
the same ZMQ socket.
"""
if self.is_persistent:
if not self._socket:
self._socket = self._create_socket()
return self._socket
else:
return self._create_socket()
def add_sock_opt(self, key, val):
if isinstance(key, str):
key = zmq.__dict__[key]
if isinstance(val, str):
val = val.encode('utf8')
self._sockopts[key] = val
@property
def sockopts(self):
return self._sockopts.copy()
@sockopts.setter
def sockopts(self, opts: dict):
self._sockopts = {}
for key, val in opts.items():
self.add_sock_opt(key, val)
@property
def identity(self):
return self._sockopts.get(zmq.IDENTITY, b'').decode('utf8')
@identity.setter
def identity(self, val):
if val:
self.logger.debug(
f"Set identity '{val}' for tube '{self.name}'."
)
self.add_sock_opt(zmq.IDENTITY, val)
def _create_socket(self) -> Socket:
raw_socket = self.context.socket(self._tube_type)
if self.is_server:
self.logger.debug(
f"The tube '{self.name}' (ZMQ.{self.tube_type_name}) "
f"binds to the port {self.addr}")
raw_socket.bind(self.addr)
else:
self.logger.debug(
f"The tube '{self.name}' (ZMQ.{self.tube_type_name}) "
f"connects to the server {self.addr}")
raw_socket.connect(self.addr)
raw_socket.__dict__['tube'] = self
for opt, val in self._sockopts.items():
raw_socket.setsockopt(opt, val)
return raw_socket
def connect(self):
"""
For persistent tubes, this open connection (connect/bind) to address.
"""
if self.is_persistent and self._socket is None:
self.raw_socket
self.is_closed = False
return self
def close(self):
"""
For persistent tubes, this close connection.
"""
if self.is_persistent and self._socket:
self.raw_socket.close()
self.is_closed = True
async def send(self, *args, **kwargs):
if args:
if isinstance(args[0], TubeMessage):
return await self.__send_message(*args, **kwargs)
elif isinstance(args[0], str):
return await self.__send_payload(*args, **kwargs)
elif kwargs:
if 'message' in kwargs:
return await self.__send_message(**kwargs)
elif 'topic' in kwargs:
return await self.__send_payload(**kwargs)
raise NotImplementedError("Unknown type of topic")
async def __send_payload(self, topic: str, payload=None, raw_socket=None):
"""
Send payload to topic.
:param topic - topic
:param payload - payload
:param raw_socket - zmqSocket, it used for non permanent connection
"""
message = TubeMessage(
self,
payload=payload,
topic=topic,
raw_socket=raw_socket if raw_socket else self.raw_socket
)
await self.__send_message(message)
async def __send_message(self, message: TubeMessage):
"""
Send message.
:param message - TubeMessage
"""
raw_msg = message.format_message()
self.logger.debug("Send (tube: %s) to %s", self.name, raw_msg)
if not message.raw_socket or message.raw_socket.closed:
raise TubeConnectionError(
f'The tube {message.tube.name} is already closed.')
try:
await message.raw_socket.send_multipart(raw_msg)
try:
if self.monitor:
await self.monitor.send_message(message)
except Exception as ex:
self.logger.error(
"The error with sending of an outgoing message "
"to the monitor tube.",
exc_info=ex)
except (TypeError, zmq.ZMQError) as ex:
raise TubeMessageError(
f"The message '{message}' does not be sent.") from ex
async def request(self, *args, post_send_callback=None,
**kwargs) -> TubeMessage:
"""
Send request
:param request: Optional[TubeMessage]
:param topic: Optional[str]
:param payload: Optional[dict]
:param timeout: int
:param post_send_callback: Optional[Callable]
:param utf8_decoding: bool (default True)
:return:
"""
if args:
if isinstance(args[0], TubeMessage):
return await self.__request_message(
*args, post_send_callback=post_send_callback, **kwargs
)
elif isinstance(args[0], str):
return await self.__request_payload(
*args, post_send_callback=post_send_callback, **kwargs
)
elif kwargs:
if 'message' in kwargs:
return await self.__request_message(
post_send_callback=post_send_callback,
**kwargs
)
elif 'topic' in kwargs:
return await self.__request_payload(
post_send_callback=post_send_callback,
**kwargs
)
raise NotImplementedError("Unknown type of topic")
async def __request_payload(self, topic: str, payload=None, timeout=None,
post_send_callback=None, utf8_decoding=None):
request = TubeMessage(
self,
payload=payload,
topic=topic,
raw_socket=self.raw_socket
)
return await self.__request_message(
request, timeout=timeout, post_send_callback=post_send_callback,
utf8_decoding=utf8_decoding
)
async def __request_message(self, request: TubeMessage, timeout: int = 30,
post_send_callback=None, utf8_decoding=None):
if self.tube_type != zmq.REQ:
raise TubeMethodNotSupported(
f"The tube '{self.name}' (type: '{self.tube_type_name}') "
f"can request topic."
)
try:
await self.send(request)
if post_send_callback:
if asyncio.iscoroutinefunction(post_send_callback):
await post_send_callback(request)
else:
post_send_callback(request)
if await request.raw_socket.poll(timeout * 1000) != 0:
response = await self.receive_data(
raw_socket=request.raw_socket,
utf8_decoding=utf8_decoding
)
if response.topic != request.topic:
raise TubeMessageError(
f"The response comes to different topic "
f"({request.topic} != {response.topic}).")
return response
finally:
if not self.is_persistent:
# self.logger.debug(f"Close tube {self.name}")
if request.raw_socket and not request.raw_socket.closed:
request.raw_socket.close()
if self.is_closed:
raise TubeConnectionError(f'The tube {self.name} was closed.')
raise TubeMessageTimeout(
f"No answer for the request in {timeout}s. Topic: {request.topic}")
async def receive_data(self, raw_socket=None, utf8_decoding=None):
if not raw_socket:
raw_socket = self.raw_socket
raw_data = await raw_socket.recv_multipart()
self.logger.debug(
f"Received (tube {self.name}): {raw_data}")
message = TubeMessage(tube=self, raw_socket=raw_socket)
message.parse(
raw_data,
self.utf8_decoding if utf8_decoding is None else utf8_decoding
)
try:
if self.monitor:
await self.monitor.receive_message(message)
except Exception as ex:
self.logger.error("The error with sending of an incoming message "
"to the monitor tube.",
exc_info=ex)
return message
class TubeMonitor:
CACHE = {}
def __new__(cls, *args, **kwargs):
addr = args[0] if args else kwargs.get('addr')
if addr not in cls.CACHE:
cls.CACHE[addr] = super(TubeMonitor, cls).__new__(cls)
return cls.CACHE[addr]
def __init__(self, addr: str):
# Because singleton execute __init__ for each try.
if hasattr(self, 'addr') and self.addr:
return
self.addr = addr
self.context = Context.instance()
self.raw_socket = None
self.enabled = False
self.node = None
self.__tubes = set()
self._time = time.time()
self.logger = logging.getLogger(self.__class__.__name__)
def register_tube(self, tube: Tube):
self.__tubes.add(tube)
tube.monitor = self
async def connect(self):
self.raw_socket = self.context.socket(zmq.PAIR)
self.raw_socket.bind(self.addr)
self.raw_socket.__dict__['monitor'] = self
try:
await self.raw_socket.send(b'__connect__', flags=zmq.NOBLOCK)
except zmq.ZMQError:
# The monitor is not connected
pass
async def close(self):
if self.raw_socket:
try:
await self.raw_socket.send(b'__disconnect__', flags=zmq.NOBLOCK)
await asyncio.sleep(.1)
except zmq.ZMQError:
# The monitor is not connected
pass
self.raw_socket.close()
self.raw_socket = None
self.enabled = False
def __format_tubes_info(self, tube):
res = {
'name': tube.name,
'addr': tube.addr,
'tube_type': tube.tube_type_name,
}
if tube.is_server:
res['server'] = 'yes'
if tube.monitor:
res['monitor'] = tube.monitor.addr
sockopts = tube.sockopts
if sockopts:
res['sockopts'] = {SOCKET_OPTION_VALUE_TO_NAME[k]: v.decode()
for k, v in sockopts.items()}
return res
def __process_cmd(self, raw_data):
self.logger.debug(f"Incoming monitoring command {raw_data}")
if raw_data == b'__enabled__':
self.enabled = True
self._time = time.time()
elif raw_data == b'__disabled__':
self.enabled = False
elif raw_data == b'__get_schema__':
schema = [self.__format_tubes_info(t) for t in self.__tubes]
schema = {'tubes': schema}
self.raw_socket.send_multipart([b'__schema__',
json.dumps(schema).encode()])
async def process(self):
if self.raw_socket:
self.__process_cmd(await self.raw_socket.recv())
def __format_message(self, msg: TubeMessage, direct: str):
now = time.time()
delta_time = str(now - self._time).encode()
self._time = now
return [delta_time, msg.tube.name.encode(), direct.encode()] + \
msg.format_message()[-2:]
async def send_message(self, msg: TubeMessage):
if self.raw_socket and self.enabled:
row_msg = self.__format_message(msg, '>')
await self.raw_socket.send_multipart(row_msg)
async def receive_message(self, msg: TubeMessage):
if self.raw_socket and self.enabled:
row_msg = self.__format_message(msg, '<')
await self.raw_socket.send_multipart(row_msg)
class TubeNode:
__TUBE_CLASS = Tube
__MONITOR_CLASS = TubeMonitor
def __init__(self, *, schema=None, warning_not_mach_topic=True):
self.logger = logging.getLogger(self.__class__.__name__)
self._tubes = TopicMatcher()
self._callbacks = TopicMatcher()
self.__monitors = set()
if schema:
self.parse_schema(schema)
self._stop_main_loop = False
self.warning_not_mach_topic = warning_not_mach_topic
async def __aenter__(self):
await self.connect()
args = {} if LESS38 else {'name': 'zmq/main'}
asyncio.create_task(self.start(), **args)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
self.stop()
await self.close()
@property
def tubes(self) -> [Tube]:
"""
returns a list of all registered tubes
"""
return flatten(self._tubes.values())
async def connect(self):
"""
opens all persistent connections
"""
for tube in self.tubes:
tube.connect()
for monitor in self.__monitors:
await monitor.connect()
async def close(self):
"""
close all persistent connections
"""
for tube in self.tubes:
tube.close()
for monitor in self.__monitors:
await monitor.close()
def parse_schema(self, schema):
"""
parses tubes from configuration
"""
if 'tubes' in schema:
for tube_info in schema['tubes']:
monitor = None
if 'monitor' in tube_info:
monitor = self.__MONITOR_CLASS(tube_info['monitor'])
del tube_info['monitor']
tube = self.__TUBE_CLASS(**tube_info)
self.register_tube(tube, tube_info.get('topics', []))
if monitor:
self.register_monitor(tube, monitor)
def register_monitor(self, tube: Tube, monitor: TubeMonitor):
"""
Register monitor to tube
:param tube: Tube
:param monitor: Socket
"""
monitor.register_tube(tube)
monitor.node = self
self.__monitors.add(monitor)
self.logger.info(f"The tube '{tube.name}' was registered to "
f"the monitor: {monitor.addr}")
def get_tube_by_topic(self, topic: str, types=None) -> Tube:
"""
returns the Tube which is assigned to topic.
Optional: we can specify a type of tube.
"""
res = self._tubes.match(topic)
if res and types:
res = [t for t in res if t.tube_type in types]
if not res:
return None
if isinstance(res, list):
res = res[-1]
return res
def filter_tube_by_topic(self, topic: str, types=None) -> [(str, Tube)]:
tubes = self._tubes.filter(topic)
res = {}
for top, tts in tubes:
for tt in tts:
if not types or tt.tube_type in types:
res[top] = tt
return res
def get_tube_by_name(self, name: str) -> Tube:
"""
returns the Tube with the name
"""
tubes = flatten(self._tubes.values())
for tube in tubes:
if tube.name == name:
return tube
return None
def register_tube(self, tube: Tube, topics: [str]):
"""
registers list of topics to the Tube
"""
if isinstance(topics, str):
topics = [topics]
for topic in topics:
tubes = self._tubes.get_topic(topic) or []
tubes.append(tube)
self.logger.debug(f"The tube '{tube.name}' was registered to "
f"the topic: {topic}")
self._tubes.set_topic(topic, tubes)
def get_callback_by_topic(self, topic: str, tube=None) -> Callable:
"""
This return callbacks for the topic.
If any of the callbacks is assigned to the tube,
it is returned only these. Otherwise, this returns all unassigned.
"""
callbacks = []
callbacks_for_tube = []
for clb in self._callbacks.match(topic) or []:
if 'tube' not in clb.__dict__:
callbacks.append(clb)
elif clb.__dict__['tube'] == tube:
callbacks_for_tube.append(clb)
return callbacks_for_tube if callbacks_for_tube else callbacks
async def send(self, topic: str, payload=None, tube=None):
if not tube:
tube = self.get_tube_by_topic(topic, [zmq.DEALER])
if not tube:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not '
f'assigned to any Tube for '
f'dealer.')
await tube.send(topic, payload)
async def request(self, topic: str, payload=None, timeout=30,
post_send_callback=None, utf8_decoding=None
) -> TubeMessage:
tube = self.get_tube_by_topic(topic, [zmq.REQ])
if not tube:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not assigned '
f'to any Tube for request.')
res = await tube.request(topic, payload, timeout=timeout,
post_send_callback=post_send_callback,
utf8_decoding=utf8_decoding)
return res
async def publish(self, topic: str, payload=None):
"""
In the case with asyncio, the first message is very often lost.
The workaround is to connect the tube manually as soon as possible.
"""
tube = self.get_tube_by_topic(topic, [zmq.PUB])
if not tube:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not assigned '
f'to any Tube for publishing.')
await self.send(topic, payload, tube)
def subscribe(self, topic: str, fce: Callable):
topic_tubes = self.filter_tube_by_topic(topic, [zmq.SUB])
if not topic_tubes:
raise TubeTopicNotConfigured(f'The topic "{topic}" is not assigned '
f'to any Tube for subscribe.')
for tube_topic, tube in topic_tubes.items():
self.register_handler(tube_topic, fce, tube=tube)
def register_handler(self, topic: str, fce: Callable, tube: Tube = None):
"""
We can register more handlers for SUB and all will be executed.
For REP, ROUTER and DEALER, there will be executed only
the last registered.
If we want to use DEALER as server and client on the same node,
we have to specify which tube will be used for this handler.
:param topic: str
:param fce: Callable
:param tube: Tube - only for the case DEALER x DEALER on the same node.
"""
if tube:
fce.__dict__['tube'] = tube
self._callbacks.get_topic(topic, set_default=[]).append(fce)
def stop(self):
self._stop_main_loop = True
async def start(self):
async def _callback_wrapper(_callback, _request: TubeMessage):
response = await _callback(_request)
if not isinstance(response, TubeMessage):
if _request.tube.tube_type in [zmq.ROUTER]:
raise TubeMessageError(
f"The response of the {_request.tube.tube_type_name} "
f"callback has to be a instance of TubeMessage class.")
_payload = response
response = _request.create_response()
response.payload = _payload
else:
if _request.tube.tube_type in [zmq.ROUTER] and\
response.request.identity != response.identity:
raise TubeMessageError(
"The TubeMessage response object doesn't be created "
"from request object.")
await _request.tube.send(response)
poller = Poller()
loop = asyncio.get_event_loop()
run_this_thread = False
for tube in self.tubes:
if tube.tube_type in [zmq.SUB, zmq.REP, zmq.ROUTER, zmq.DEALER]:
poller.register(tube.raw_socket, zmq.POLLIN)
run_this_thread = True
for monitor in self.__monitors:
poller.register(monitor.raw_socket, zmq.POLLIN)
run_this_thread = True
if not run_this_thread:
self.logger.debug("The main loop is disabled, "
"There is not registered any supported tube.")
return
self.logger.info("The main loop was started.")
while not self._stop_main_loop:
try:
events = await poller.poll(timeout=100)
except zmq.error.ZMQError:
# This happens during shutdown
continue
# print(events)
for event in events:
raw_socket = event[0]
if isinstance(raw_socket, object) and \
'monitor' in raw_socket.__dict__:
monitor = raw_socket.__dict__['monitor']
await monitor.process()
continue
tube: Tube = raw_socket.__dict__['tube']
request = await tube.receive_data(raw_socket=raw_socket)
req_tubes = self._tubes.match(request.topic)
if req_tubes and tube not in req_tubes:
# This message is not for this node.
# The topic is not registered for this node.
continue
callbacks = self.get_callback_by_topic(request.topic, tube)
if not callbacks:
if self.warning_not_mach_topic:
self.logger.warning(
f"Incoming message does not match any topic, "
f"it is ignored (topic: {request.topic})"
)
continue
if tube.tube_type == zmq.SUB:
args = {} if LESS38 else {'name': 'zmq/sub'}
for callback in callbacks:
loop.create_task(callback(request), **args)
elif tube.tube_type == zmq.REP:
args = {} if LESS38 else {'name': 'zmq/rep'}
loop.create_task(
_callback_wrapper(callbacks[-1], request),
**args)
elif tube.tube_type == zmq.ROUTER:
args = {} if LESS38 else {'name': 'zmq/router'}
loop.create_task(
_callback_wrapper(callbacks[-1], request),
**args
)
elif tube.tube_type == zmq.DEALER:
args = {} if LESS38 else {'name': 'zmq/dealer'}
loop.create_task(callbacks[-1](request), **args)
self.logger.info("The main loop was ended.") | zmq-tubes | /zmq_tubes-1.14.0.tar.gz/zmq_tubes-1.14.0/zmq_tubes/manager.py | manager.py |
__author__ = ('Douglas Creager <[email protected]>',
'Michal Nazarewicz <[email protected]>')
__license__ = 'This file is placed into the public domain.'
__maintainer__ = 'Michal Nazarewicz'
__email__ = '[email protected]'
__all__ = ('getVersion')
import re
import subprocess
import sys
RELEASE_VERSION_FILE = 'RELEASE-VERSION'
# http://www.python.org/dev/peps/pep-0386/
_PEP386_SHORT_VERSION_RE = r'\d+(?:\.\d+)+(?:(?:[abc]|rc)\d+(?:\.\d+)*)?'
_PEP386_VERSION_RE = r'^%s(?:\.post\d+)?(?:\.dev\d+)?$' % (
_PEP386_SHORT_VERSION_RE)
_GIT_DESCRIPTION_RE = r'^v(?P<ver>%s)-(?P<commits>\d+)-g(?P<sha>[\da-f]+)$' % (
_PEP386_SHORT_VERSION_RE)
def readGitVersion():
try:
proc = subprocess.Popen(('git', 'describe', '--long',
'--match', 'v[0-9]*.*'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data, _ = proc.communicate()
if proc.returncode:
return None
ver = data.splitlines()[0].strip()
proc = subprocess.Popen(('git', 'rev-parse', '--abbrev-ref', 'HEAD'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
branch, _ = proc.communicate()
if proc.returncode:
return None
except:
return None
if not ver:
return None
m = re.search(_GIT_DESCRIPTION_RE, ver)
if not m:
sys.stderr.write('version: git description (%s) is invalid, '
'ignoring\n' % ver)
return None
commits = int(m.group('commits'))
if not commits:
version = m.group('ver')
else:
version = '%s.post%d' % (
m.group('ver'), commits)
if branch.strip() != 'master':
version += '.dev%d' % int(m.group('sha'), 16)
return version
def readReleaseVersion():
try:
fd = open(RELEASE_VERSION_FILE)
try:
ver = fd.readline().strip()
finally:
fd.close()
if not re.search(_PEP386_VERSION_RE, ver):
sys.stderr.write('version: release version (%s) is invalid, '
'will use it anyway\n' % ver)
return ver
except:
return None
def writeReleaseVersion(version):
fd = open(RELEASE_VERSION_FILE, 'w')
fd.write('%s\n' % version)
fd.close()
def getVersion():
release_version = readReleaseVersion()
version = readGitVersion() or release_version
if not version:
raise ValueError('Cannot find the version number')
if version != release_version:
writeReleaseVersion(version)
return version
if __name__ == '__main__':
print getVersion() | zmq_helpers | /zmq_helpers-0.1.tar.gz/zmq_helpers-0.1/version.py | version.py |
import sys
import argparse
import logging
import pickle
import random
import time
from threading import Thread, current_thread, enumerate as t_enumerate
from zmqbus import Bus, Connection, halt, Message
from zmqbus.device import (Device, Pulse, Scheduler, ScheduledMessage,
Clock, PerfMeter, Dispatcher)
from zmqbus.debug import Sniffer, ControlSniffer, PulseTracker
try:
import chromalog
chromalog.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(threadName)s: %(message)s')
except ModuleNotFoundError:
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname).1s %(threadName)s: %(message)s')
logger = logging.getLogger(__name__)
class DemoDispatcher(Dispatcher):
def __init__(self):
super().__init__('DemoDispatcher')
self._busy_workers = set()
def _busy_worker_logger(self, conn: Connection, msg: Message):
logger.info('%r has gone busy for %.1f seconds',
msg.sender, msg.payload)
self._busy_workers.add(msg.sender)
def _free_worker_logger(self, conn: Connection, msg: Message):
logger.info('%r has finished its %.1f seconds job',
msg.sender, msg.payload)
self._busy_workers.discard(msg.sender)
def _perf_meter_logger(self, conn: Connection, msg: Message):
logger.info('Average latency %.2f ms measured by %r from '
'%r after %d messages',
msg.payload.avg_ms, msg.sender, msg.payload.clock,
msg.payload.samples)
def _init_callbacks(self):
self.add_callback('PerfMeter', self._perf_meter_logger)
self.add_callback('worker.busy', self._busy_worker_logger)
self.add_callback('worker.free', self._free_worker_logger)
def init(self, conn):
super().init(conn)
self._busy_workers.clear()
def done(self, conn):
super().done(conn)
logger.info('Finished')
if self._busy_workers:
logger.warning('The following workers are still busy: %s',
', '.join([repr(w) for w in self._busy_workers]))
def worker_main(address, authkey, me, delay_secs, workers):
prng = random.Random()
conn = Connection(address, authkey=authkey, name=f'Worker{me}')
conn.subscribe('workers')
def do_something(lengthy):
if lengthy:
secs = delay_secs + (prng.random() * delay_secs)
conn.send('worker.busy', payload=secs)
logger.info('Going very busy for %.1f seconds', secs)
else:
secs = 0.5 + prng.random()
# Note: we don't call conn.sleep(), because we hant to
# simulate a busy task that does not check connection traffic.
time.sleep(secs)
if lengthy:
conn.send('worker.free', payload=secs)
logger.info('Back to work after being busy for %d seconds',
secs)
prob = 0.1 / workers
logger.info('Starting with %.2f probability of something happening', prob)
try:
while conn.is_alive():
try:
msg = conn.recv(delay_secs * 0.5)
except TimeoutError:
msg = None
if msg:
if msg.to is None:
to = 'all workers'
elif msg.to == conn.name:
to = 'me'
else:
to = repr(msg.to)
logger.info('Got %r from %r sent to %s',
msg.payload, msg.sender, to)
if workers > 1 and prng.random() < prob:
logger.info('Sending message to all workers')
conn.send('workers', "Let's work")
do_something(lengthy=prng.random() < prob)
if workers > 1 and prng.random() < prob:
while True:
peer = prng.randint(0, workers - 1)
if peer != me:
break
logger.info('Saying hello to Worker%d', peer)
conn.send('workers', 'hello', to=f'Worker{peer}')
conn.sleep(1 + (prng.random() * 2))
except BrokenPipeError:
pass
finally:
conn.shutdown()
logger.info('Finished')
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
group = parser.add_argument_group('bus options')
group.add_argument('-a', '--address',
help='the bus address')
group.add_argument('-l', '--load',
metavar='FILE',
help='load pickled (address, authkey) from %(metavar)s')
group.add_argument('--ping-interval',
type=int,
default=10,
help='bus ping interval in seconds')
group.add_argument('--ping-timeout',
type=int,
default=10,
help='bus ping timeout in seconds')
group = parser.add_argument_group('pulse threads options')
group.add_argument('-P', '--pulses',
type=int,
default=3,
help='number of pulse threads')
group.add_argument('--pulse-wait',
type=int,
default=20,
help='wait time between pulses')
group.add_argument('--pulse-jitter',
type=float,
default=0.20,
help='pulse jitter')
group = parser.add_argument_group('worker threads options')
group.add_argument('-w', '--workers',
type=int,
default=3,
help='number of worker threads')
group = parser.add_argument_group('scheduler threads options')
group.add_argument('-S', '--schedulers',
type=int,
default=3,
help='number of messages to schedule')
group.add_argument('--scheduler-messages',
type=int,
default=20,
help='number of messages to schedule')
group = parser.add_argument_group('demo options')
group.add_argument('-s', '--speed',
default='auto',
choices=('auto', 'slower', 'slow',
'normal', 'fast', 'faster'),
help='demo speed')
group.add_argument('--no-clock',
action='store_true',
help='do not start a clock thread')
return parser.parse_args()
def get_random_workers(workers):
return random.sample([f'Worker{i}' for i in range(workers)],
random.randint(1, workers))
def wait_all_threads(timeout):
while True:
active = set(t for t in t_enumerate() if t is not current_thread())
for t in active.copy():
t.join(timeout=timeout)
if not t.is_alive():
active.remove(t)
if not active:
break
logger.warning('The following threads are still alive: %s',
', '.join([repr(a.name) for a in active]))
def main():
args = get_args()
speed_mult: float
if args.speed == 'slower':
speed_mult = 10
elif args.speed == 'slow':
speed_mult = 4
elif args.speed == 'normal':
speed_mult = 1
elif args.speed == 'fast':
speed_mult = 0.2
elif args.speed == 'faster':
speed_mult = 0.05
else:
assert args.speed == 'auto'
speed_mult = (args.workers / 3) * (args.pulses / 3)
threads = []
if args.load and args.address:
sys.exit('Cannot use --load and --address at the same time')
if args.load:
with open(args.load, 'rb') as fd:
(address, authkey) = pickle.load(fd)
else:
# Create the bus in the main thread so we can get its address.
bus = Bus(args.address or 'tcp://127.0.0.1:*',
ping_interval_secs=args.ping_interval,
ping_timeout_secs=args.ping_timeout)
threads.append(Thread(name='Bus', target=bus.run_forever))
address = bus.address
authkey = bus.authkey
dev: Device
dev = DemoDispatcher()
threads.append(Thread(name=dev.name, target=dev, args=(address, authkey)))
if not args.no_clock:
dev = Clock(name='Clock')
threads.append(Thread(name=dev.name, target=dev,
args=(address, authkey)))
dev = PerfMeter(name='PerfMeter', sender='Clock', )
threads.append(Thread(name=dev.name, target=dev,
args=(address, authkey)))
dev = Sniffer(name='Sniffer')
threads.append(Thread(name=dev.name, target=dev,
args=(address, authkey)))
dev = ControlSniffer(name='ControlSniffer')
threads.append(Thread(name=dev.name, target=dev,
args=(address, authkey)))
if args.pulses:
dev = PulseTracker(name='PulseTracker', topic='Pulse')
threads.append(Thread(name=dev.name, target=dev,
args=(address, authkey)))
for i in range(args.pulses):
dev = Pulse(name=f'Pulse{i}',
wait_secs=args.pulse_wait * speed_mult,
jitter=args.pulse_jitter)
threads.append(Thread(name=dev.name, target=dev,
args=(address, authkey)))
for i in range(args.schedulers):
messages = []
# We want to stop after roughly 2 minutes.
delay = 120 / args.scheduler_messages
# Probability to send a message to some worker.
try:
prob = 0.5 / (args.workers - 1)
except ZeroDivisionError:
prob = 0
for j in range(args.scheduler_messages):
messages.append(
ScheduledMessage(secs=(delay
* (0.5 + random.random())),
topic='scheduler',
payload=f'Scheduled message #{j}'))
if random.random() < prob:
messages.append(
ScheduledMessage(secs=(delay * random.random()),
topic='workers',
payload='Hi there',
to=get_random_workers(args.workers)))
if i == 0:
# First schedule will have one extra message, and halt the
# bus.
messages.append(
ScheduledMessage(secs=delay,
topic='schedule',
payload='Halting the bus'))
after = lambda conn: halt(conn.address, authkey) # noqa: E731
else:
after = None
dev = Scheduler(
name=f'Scheduler{i}',
before=lambda conn: conn.send('scheduler', f'Before scheduler{i}'),
messages=messages.copy(),
after=after
)
threads.append(Thread(name=f'Scheduler{i}',
target=dev, args=(address, authkey)))
for i in range(args.workers):
threads.append(Thread(name=f'Worker{i}',
target=worker_main,
args=(address,
authkey,
i,
args.ping_timeout,
args.workers)))
for t in threads:
t.start()
if args.speed != 'faster':
time.sleep(1 * speed_mult)
conn = Connection(address, name='demo', authkey=authkey)
try:
while conn.is_alive():
conn.poll(None)
except BrokenPipeError:
pass
except KeyboardInterrupt:
halt(address, authkey, timeout_ms=args.ping_timeout * 2 * 1_000)
finally:
wait_all_threads(args.ping_timeout * 2)
logger.info('Demo finished')
if __name__ == '__main__':
sys.exit(main()) | zmqbus | /zmqbus-0.1.0b0.tar.gz/zmqbus-0.1.0b0/demo.py | demo.py |
A bus implementation for Python using ZeroMQ
============================================
_zmqbus_ is package for Python that allows for communication between
collaborative processes in a bus fashion on the same machine. The bus
allows connected parties to subscribe and publish messages, with the
ability to direct messages to specific connections. It also supports
request/response messages for remote procedure calls (RPC), where
connections can send request and receive responses from well-known
addresses registered in the bus.
**IMPORTANT**: _zmqbus_ is still beta software. Use in production is
strongly discouraged.
Requirements
------------
- Python 3.7 or above
- pyzmq
Installation
------------
You can install _zmqbus_ using pip:
pip install zmqbus
Usage
-----
See [demo.py](demo.py) for usage examples.
License
-------
See the [LICENSE.txt file](LICENSE.txt) for details.
Bug? Critics? Suggestions?
--------------------------
Go to https://github.com/flaviovs/zmqbus
| zmqbus | /zmqbus-0.1.0b0.tar.gz/zmqbus-0.1.0b0/README.md | README.md |
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.24"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:]) | zmqc | /zmqc-0.1.0.tar.gz/zmqc-0.1.0/distribute_setup.py | distribute_setup.py |
## Usage:
# zmqc [-0] [-r | -w] (-b | -c) SOCK_TYPE [-o SOCK_OPT=VALUE...] address [address ...]
## Examples:
# zmqc -rc SUB 'tcp://127.0.0.1:5000'
#
# Subscribe to 'tcp://127.0.0.1:5000', reading messages from it and printing
# them to the console. This will subscribe to all messages by default.
#
# ls | zmqc -wb PUSH 'tcp://*:4000'
#
# Send the name of every file in the current directory as a message from a
# PUSH socket bound to port 4000 on all interfaces. Don't forget to quote the
# address to avoid glob expansion.
#
# zmqc -rc PULL 'tcp://127.0.0.1:5202' | tee $TTY | zmqc -wc PUSH 'tcp://127.0.0.1:5404'
#
# Read messages coming from a PUSH socket bound to port 5202 (note that we're
# connecting with a PULL socket), echo them to the active console, and
# forward them to a PULL socket bound to port 5404 (so we're connecting with
# a PUSH).
#
# zmqc -n 10 -0rb PULL 'tcp://*:4123' | xargs -0 grep 'pattern'
#
# Bind to a PULL socket on port 4123, receive 10 messages from the socket
# (with each message representing a filename), and grep the files for
# `'pattern'`. The `-0` option means messages will be NULL-delimited rather
# than separated by newlines, so that filenames with spaces in them are not
# considered two separate arguments by xargs.
#
# echo "hello" | zmqc -c REQ 'tcp://127.0.0.1:4000'
#
# Send the string "hello" through a REQ socket connected to localhost port
# 4000, print whatever you get back and finish. In this way, REQ sockets can
# be used for a rudimentary form of RPC in shell scripts.
#
# coproc zmqc -b REP 'tcp://*:4000'
# tr -u '[a-z]' '[A-Z]' <&p >&p &
# echo "hello" | zmqc -c REQ 'tcp://127.0.0.1:4000'
#
# First, start a ZeroMQ REP socket listening on port 4000. The 'coproc' shell
# command runs this as a shell coprocess, which allows us to run the next
# line, tr. This will read its input from the REP socket's output, translate
# all lowercase characters to uppercase, and send them back to the REP
# socket's input. This, again, is run in the background. Finally, connect a
# REQ socket to that REP socket and send the string "hello" through it: you
# should just see the string "HELLO" printed on stdout.
## License:
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http://unlicense.org/>
import argparse
import array
import errno
import itertools
import re
import sys
import zmq
__version__ = '0.0.1'
class ParserError(Exception):
"""An exception which occurred when parsing command-line arguments."""
pass
parser = argparse.ArgumentParser(
prog='zmqc', version=__version__,
usage=
"%(prog)s [-h] [-v] [-0] [-r | -w] (-b | -c)\n "
"SOCK_TYPE [-o SOCK_OPT=VALUE...]\n "
"address [address ...]",
description="zmqc is a small but powerful command-line interface to "
"ZeroMQ. It allows you to create a socket of a given type, bind or "
"connect it to multiple addresses, set options on it, and receive or send "
"messages over it using standard I/O, in the shell or in scripts.",
epilog="This is free and unencumbered software released into the public "
"domain. For more information, please refer to <http://unlicense.org>.",
)
parser.add_argument('-0',
dest='delimiter', action='store_const',
const='\x00', default='\n',
help="Separate messages on input/output should be "
"delimited by NULL characters (instead of newlines). Use "
"this if your messages may contain newlines, and you want "
"to avoid ambiguous message borders.")
parser.add_argument('-n', metavar='NUM',
dest='number', type=int, default=None,
help="Receive/send only NUM messages. By default, zmqc "
"lives forever in 'read' mode, or until the end of input "
"in 'write' mode.")
mode_group = parser.add_argument_group(
title='Mode',
description="Whether to read from or write to the socket. For PUB/SUB "
"sockets, this option is invalid since the behavior will always be write "
"and read respectively. For REQ/REP sockets, zmqc will alternate between "
"reading and writing as part of the request/response cycle.")
mode = mode_group.add_mutually_exclusive_group(required=False)
mode.add_argument('-r', '--read',
dest='mode', action='store_const', const='r',
help="Read messages from the socket onto stdout.")
mode.add_argument('-w', '--write',
dest='mode', action='store_const', const='w',
help="Write messages from stdin to the socket.")
behavior_group = parser.add_argument_group(title='Behavior')
behavior = behavior_group.add_mutually_exclusive_group(required=True)
behavior.add_argument('-b', '--bind',
dest='behavior', action='store_const', const='bind',
help="Bind to the specified address(es).")
behavior.add_argument('-c', '--connect',
dest='behavior', action='store_const', const='connect',
help="Connect to the specified address(es).")
sock_params = parser.add_argument_group(title='Socket parameters')
sock_type = sock_params.add_argument('sock_type', metavar='SOCK_TYPE',
choices=('PUSH', 'PULL', 'PUB', 'SUB', 'REQ', 'REP', 'PAIR'), type=str.upper,
help="Which type of socket to create. Must be one of 'PUSH', 'PULL', "
"'PUB', 'SUB', 'REQ', 'REP' or 'PAIR'. See `man zmq_socket` for an "
"explanation of the different types. 'DEALER' and 'ROUTER' sockets are "
"currently unsupported.")
sock_opts = sock_params.add_argument('-o', '--option',
metavar='SOCK_OPT=VALUE', dest='sock_opts', action='append', default=[],
help="Socket option names and values to set on the created socket. "
"Consult `man zmq_setsockopt` for a comprehensive list of options. Note "
"that you can safely omit the 'ZMQ_' prefix from the option name. If the "
"created socket is of type 'SUB', and no 'SUBSCRIBE' options are given, "
"the socket will automatically be subscribed to everything.")
addresses = sock_params.add_argument('addresses', nargs='+', metavar='address',
help="One or more addresses to bind/connect to. Must be in full ZMQ "
"format (e.g. 'tcp://<host>:<port>')")
def read_until_delimiter(stream, delimiter):
"""
Read from a stream until a given delimiter or EOF, or raise EOFError.
>>> io = StringIO("abcXdefgXfoo")
>>> read_until_delimiter(io, "X")
"abc"
>>> read_until_delimiter(io, "X")
"defg"
>>> read_until_delimiter(io, "X")
"foo"
>>> read_until_delimiter(io, "X")
Traceback (most recent call last):
...
EOFError
"""
output = array.array('c')
c = stream.read(1)
while c and c != delimiter:
output.append(c)
c = stream.read(1)
if not (c or output):
raise EOFError
return output.tostring()
def get_sockopts(sock_opts):
"""
Turn a list of 'OPT=VALUE' into a list of (opt_code, value).
Work on byte string options:
>>> get_sockopts(['SUBSCRIBE=', 'SUBSCRIBE=abc'])
[(6, ''), (6, 'abc')]
Automatically convert integer options to integers:
>>> zmqc.get_sockopts(['LINGER=0', 'LINGER=-1', 'LINGER=50'])
[(17, 0), (17, -1), (17, 50)]
Spew on invalid input:
>>> zmqc.get_sockopts(['LINGER=foo'])
Traceback (most recent call last):
...
zmqc.ParserError: Invalid value for option LINGER: 'foo'
>>> zmqc.get_sockopts(['NONEXISTENTOPTION=blah'])
Traceback (most recent call last):
...
zmqc.ParserError: Unrecognised socket option: 'NONEXISTENTOPTION'
"""
option_coerce = {
int: set(zmq.core.constants.int_sockopts).union(
zmq.core.constants.int64_sockopts),
str: set(zmq.core.constants.bytes_sockopts)
}
options = []
for option in sock_opts:
match = re.match(r'^([A-Z_]+)\=(.*)$', option)
if not match:
raise ParserError("Invalid option spec: %r" % match)
opt_name = match.group(1)
if opt_name.startswith('ZMQ_'):
opt_name = opt_name[4:]
try:
opt_code = getattr(zmq.core.constants, opt_name.upper())
except AttributeError:
raise ParserError("Unrecognised socket option: %r" % (
match.group(1),))
opt_value = match.group(2)
for converter, opt_codes in option_coerce.iteritems():
if opt_code in opt_codes:
try:
opt_value = converter(opt_value)
except (TypeError, ValueError):
raise ParserError("Invalid value for option %s: %r" % (
opt_name, opt_value))
break
options.append((opt_code, opt_value))
return options
def main():
args = parser.parse_args()
# Do some initial validation which is more complex than what can be
# specified in the argument parser alone.
if args.sock_type == 'SUB' and args.mode == 'w':
parser.error("Cannot write to a SUB socket")
elif args.sock_type == 'PUB' and args.mode == 'r':
parser.error("Cannot read from a PUB socket")
elif args.mode is not None and args.sock_type in ('REQ', 'REP'):
parser.error("Cannot choose a read/write mode with a %s socket" %
args.sock_type)
elif args.mode is None and args.sock_type not in ('REQ', 'REP'):
parser.error("one of the arguments -r/--read -w/--write is required")
# We also have to work around the fact that 'required' mutually exclusive
# groups are not enforced when you put them in an argument group other
# than the top-level parser.
if args.behavior is None:
parser.error("one of the arguments -b/--bind -c/--connect is required")
context = zmq.Context.instance()
sock = context.socket(getattr(zmq, args.sock_type))
# Set any specified socket options.
try:
sock_opts = get_sockopts(args.sock_opts)
except ParserError, exc:
parser.error(str(exc))
else:
for opt_code, opt_value in sock_opts:
sock.setsockopt(opt_code, opt_value)
# If we have a 'SUB' socket that's not explicitly subscribed to
# anything, subscribe it to everything.
if (sock.socket_type == zmq.SUB and
not any(opt_code == zmq.SUBSCRIBE
for (opt_code, _) in sock_opts)):
sock.setsockopt(zmq.SUBSCRIBE, '')
# Bind or connect to the provided addresses.
for address in args.addresses:
getattr(sock, args.behavior)(address)
# Live forever if no `-n` argument was given, otherwise die after a fixed
# number of messages.
if args.number is None:
iterator = itertools.repeat(None)
else:
iterator = itertools.repeat(None, args.number)
try:
if args.sock_type == 'REQ':
req_loop(iterator, sock, args.delimiter, sys.stdin, sys.stdout)
elif args.sock_type == 'REP':
rep_loop(iterator, sock, args.delimiter, sys.stdin, sys.stdout)
elif args.mode == 'r':
read_loop(iterator, sock, args.delimiter, sys.stdout)
elif args.mode == 'w':
write_loop(iterator, sock, args.delimiter, sys.stdin)
except StopIteration:
# StopIteration is a sentinel for end of input, iterator exhaustion
# (that is, we've processed the maximum number of messages) or Ctrl-C.
# All need to be handled in the same way.
return
finally:
sock.close()
def req_loop(iterator, sock, delimiter, input, output):
"""Write/read interaction for a REQ socket."""
for _ in iterator:
write(sock, delimiter, input)
read(sock, delimiter, output)
def rep_loop(iterator, sock, delimiter, input, output):
"""Read/write interaction for a REP socket."""
for _ in iterator:
read(sock, delimiter, output)
write(sock, delimiter, input)
def read_loop(iterator, sock, delimiter, output):
"""Continously get messages from the socket and print them on output."""
for _ in iterator:
read(sock, delimiter, output)
def write_loop(iterator, sock, delimiter, input):
"""Continously get messages from input and send them through a socket."""
for _ in iterator:
write(sock, delimiter, input)
def read(sock, delimiter, output):
"""Read one message from a socket onto an output stream."""
try:
message = sock.recv()
output.write(message + delimiter)
output.flush()
except KeyboardInterrupt:
raise StopIteration
except IOError, exc:
if exc.errno == errno.EPIPE:
raise StopIteration
raise
def write(sock, delimiter, input):
"""Write one message from an input stream into a socket."""
try:
message = read_until_delimiter(input, delimiter)
sock.send(message)
except (KeyboardInterrupt, EOFError):
raise StopIteration
if __name__ == '__main__':
main() | zmqc | /zmqc-0.1.0.tar.gz/zmqc-0.1.0/lib/zmqc.py | zmqc.py |
# zmqcli
zmqcli is a small but powerful command-line interface to [ØMQ][zmq] written in Python 3.
It allows you to create a socket of a given type, bind or connect it to multiple
addresses, set options on it, and receive or send messages over it using
standard I/O, in the shell or in scripts.
It's useful for debugging and
experimenting with most possible network topologies.
[zmq]: http://www.zeromq.org/
## Installation
pip install zmqcli
## Usage
zmqcli [-h] [-v] [-0] [-r | -w] (-b | -c) SOCK_TYPE [-o SOCK_OPT=VALUE...] address [address ...]
Executing the command as a module is also supported:
python -m zmqcli [-h] [-v] [-0] [-r | -w] (-b | -c) SOCK_TYPE [-o SOCK_OPT=VALUE...] address [address ...]
### Mode
<dl>
<p>
Whether to read from or write to the socket. For PUB/SUB sockets, this
option is invalid since the behavior will always be write and read
respectively. For REQ/REP sockets, zmqcli will alternate between reading and
writing as part of the request/response cycle.
</p>
<dt>-r, --read</dt>
<dd>Read messages from the socket onto stdout.</dd>
<dt>-w, --write</dt>
<dd>Write messages from stdin to the socket.</dd>
</dl>
### Behavior
<dl>
<dt>-b, --bind</dt>
<dd>Bind to the specified address(es).</dd>
<dt>-c, --connect</dt>
<dd>Connect to the specified address(es).</dd>
</dl>
### Socket Parameters
<dl>
<dt>SOCK_TYPE</dt>
<dd>
Which type of socket to create. Must be one of `PUSH`, `PULL`, `PUB`,
`SUB`, `REQ`, `REP` or `PAIR`. See `man zmq_socket` for an explanation of
the different types. `DEALER` and `ROUTER` sockets are currently
unsupported.
</dd>
<dt>-o SOCK_OPT=VALUE, --option SOCK_OPT=VALUE</dt>
<dd>
Socket option names and values to set on the created socket. Consult `man
zmq_setsockopt` for a comprehensive list of options. Note that you can
safely omit the `ZMQ_` prefix from the option name. If the created socket
is of type `SUB`, and no `SUBSCRIBE` options are given, the socket will
automatically be subscribed to everything.
</dd>
<dt>address</dt>
<dd>
One or more addresses to bind/connect to. Must be in full ZMQ format (e.g.
`tcp://<host>:<port>`)
</dd>
</dl>
## Examples
zmqcli -rc SUB 'tcp://127.0.0.1:5000'
Subscribe to `tcp://127.0.0.1:5000`, reading messages from it and printing them
to the console. This will subscribe to all messages by default (you don't need
to set an empty `SUBSCRIBE` option). Alternatively:
zmqcli -rc SUB -o SUBSCRIBE='com.organization.' 'tcp://127.0.0.1:5000'
This will subscribe to all messages starting with `com.organization.`.
* * * *
ls | zmqcli -wb PUSH 'tcp://*:4000'
Send the name of every file in the current directory as a message from a PUSH
socket bound to port 4000 on all interfaces. Don't forget to quote the address
to avoid glob expansion.
* * * *
zmqcli -rc PULL 'tcp://127.0.0.1:5202' | tee $TTY | zmqcli -wc PUSH 'tcp://127.0.0.1:5404'
Read messages coming from a PUSH socket bound to port 5202 (note that we're
connecting with a PULL socket), echo them to the active console, and forward
them to a PULL socket bound to port 5404 (so we're connecting with a PUSH).
* * * *
zmqcli -n 10 -0rb PULL 'tcp://*:4123' | xargs -0 grep 'pattern'
Bind to a PULL socket on port 4123, receive 10 messages from the socket
(with each message representing a filename), and grep the files for
`'pattern'`. The `-0` option means messages will be NULL-delimited rather
than separated by newlines, so that filenames with spaces in them are not
considered two separate arguments by xargs.
* * * *
echo "hello" | zmqcli -c REQ 'tcp://127.0.0.1:4000'
Send the string `hello` through a REQ socket connected to localhost on port
4000, print whatever you get back, and finish. In this way, REQ sockets can
be used for a rudimentary form of RPC in shell scripts.
* * * *
coproc zmqcli -b REP 'tcp://*:4000'
tr -u '[a-z]' '[A-Z]' <&p >&p &
echo "hello" | zmqcli -c REQ 'tcp://127.0.0.1:4000'
First, start a REP socket listening on port 4000. The `coproc` shell command
runs this as a shell coprocess, which allows us to run the next line, tr. This
will read its input from the REP socket's output, translate all lowercase
characters to uppercase, and send them back to the REP socket's input. This,
again, is run in the background. Finally, connect a REQ socket to that REP
socket and send the string `hello` through it: you should just see the string
`HELLO` printed on stdout.
* * * *
Pingy exchange example
Sending ping lines from ping command -> PUSH -> PULL -> PUB -> SUB -> stdout:
Source of ping records:
ping google.com | zmqcli -w -c PUSH tcp://127.0.0.1:5001
Broker:
zmqcli -r -b PULL tcp://*:5001 | zmqcli -w -b PUB tcp://*:5002
Consumer:
python -m zmqcli -r -c SUB tcp://127.0.0.1:5002
the consumer can be run in multiple instances reporting always the same records.
## Credits
Based on the work of [Zachary Voase](https://github.com/zacharyvoase), the author of the original [zmqc](https://github.com/zacharyvoase/zmqc) tool.
## (Un)license
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
| zmqcli | /zmqcli-0.9.7.tar.gz/zmqcli-0.9.7/README.md | README.md |
# zmqcs
`zmqcs` is apython package that implements a client server infrastructure based on the zeromq library. To do so it fixes some properties:
- There are 2 types of concurrent communications:
- Request - response: Used to send commands from the client to the server. Server always answer when the command has finished. Is up to the developer to let the answer return before ending the comand in case the command is takes long to execute.
- Pub - Sub: The server has the ability to send packages to the client with data. The client has to subscribe to the topics he wants to receive data and define a callback to be executed every time data is received for that topic
A full detailed example on how to use the library can be found at https://github.com/IFAEControl/zmqCSExample
All the packages are JSON formatted. | zmqcs | /zmqcs-0.1.1.tar.gz/zmqcs-0.1.1/README.md | README.md |
import argparse
import sys
def main():
parser = setupParser()
conf = parser.parse_args()
socket = openSocket(conf)
printLoop(socket)
def setupParser():
parser = argparse.ArgumentParser(
prog = "zmqdump",
description = "dump zmq messages on a socket"
)
parser.add_argument(
"socket_type",
help= "type of zmq socket.",
type = str,
choices = ["SUB","PUB","PUSH","PULL","REQ","REP"]
)
parser.add_argument(
"endpoint",
help="endpoint to listen on messages (tcp://127.0.0.1)",
type = str
)
parser.add_argument(
"-d", "--delay",
help = "initial delay before sendig out messages",
dest = "delay", type = int, default = 0
)
parser.add_argument(
"-hwm",
help="High water mark.",
dest="hwm", type=int, default = 1000
)
parser.add_argument(
"-b", "--bind",
help="bind socket instead of connect",
dest="bind", default = False,
action = "store_true"
)
return parser
## TESTS ##
def test():
testSetupParser()
def testSetupParser():
parser = setupParser()
parser.print_help()
def pArgStr(s):
print "> " + s
ns = parser.parse_args(s.split())
print ns
return ns
ns = pArgStr("SUB tcp://127.0.0.1:8000")
assert (ns.socket_type == "SUB")
assert (ns.endpoint == "tcp://127.0.0.1:8000")
assert (ns.hwm == 1000)
assert (ns.bind == False)
assert (ns.delay == 0)
ns = pArgStr("PUB tcp://127.0.0.1:8000 -d 200 -b")
assert (ns.socket_type == "PUB")
assert (ns.endpoint == "tcp://127.0.0.1:8000")
assert (ns.hwm == 1000)
assert (ns.bind == True)
assert (ns.delay == 200)
ns = pArgStr("PULL tcp://127.0.0.1:8000")
assert (ns.socket_type == "PULL")
assert (ns.endpoint == "tcp://127.0.0.1:8000")
assert (ns.hwm == 1000)
assert (ns.bind == False)
assert (ns.delay == 0)
ns = pArgStr("PULL tcp://127.0.0.1:8000 -hwm 1")
assert (ns.socket_type == "PULL")
assert (ns.endpoint == "tcp://127.0.0.1:8000")
assert (ns.hwm == 1)
assert (ns.bind == False)
assert (ns.delay == 0)
ns = pArgStr("--bind PUSH -hwm 1 tcp://127.0.0.1:8000 -d 100 ")
assert (ns.socket_type == "PUSH")
assert (ns.endpoint == "tcp://127.0.0.1:8000")
assert (ns.hwm == 1)
assert (ns.bind == True)
assert (ns.delay == 100)
if __name__ == "__main__":
test()
main() | zmqdump | /zmqdump-0.1.tar.gz/zmqdump-0.1/zmqdump.py | zmqdump.py |
# zmqflp
Improvements to the Freelance protocol-based zeromq server/client (Python)
The client and server talk using msgpack, so the api accepts dictionaries as input.
## To create a zmqflp server:
```python
# create the server object (it runs in an asyncio zmq context)
self.server = zmqflp_server.ZMQFLPServer(self.config.identity, self.config.zmq_port)
# use the following code to process messages received by the server and send them back
async def process_messages(self):
(serialized_request, orig_headers) = await self.server.receive()
if serialized_request == 'EXIT':
await self.server.send(orig_headers, 'exiting')
return False
elif serialized_request != "PING":
try:
request = serialized_request
response = self.process_request(request)
await self.server.send(orig_headers, response)
return True
except Exception as e:
logging.exception(e)
return False
return True
```
## To create a client without using a context manager:
```python
# create the client object (this does NOT run in an asyncio context)
self.client = zmqflp_client.ZMQFLPClient(self.config.list_of_servers)
# to send and receive with the client
msg_to_send = {'message': 'hello!', 'other-key': 'stuff goes here'}
status = self.client.send_and_receive(msg_to_send)
```
## To create a client using a context manager (for example, to run on AWS Lambda):
```python
# create the client object (this does NOT run in an asyncio context)
with zmqflp_client.ZMQFLPClient(self.config.list_of_servers) as client:
# to send and receive with the client
msg_to_send = {'message': 'hello!', 'other-key': 'stuff goes here'}
status = client.send_and_receive(msg_to_send)
``` | zmqflp | /zmqflp-0.3.0.tar.gz/zmqflp-0.3.0/README.md | README.md |
import numpy
import zmq
import functools
import uuid
def array_to_msg(nparray):
"""
Convert a numpy ndarray to its multipart zeromq message representation.
The return list is composed of:
0. The string representation of the array element type, i.e. 'float32'
1. The binary string representation of the shape of the array converted to a numpy array with dtype int32
2. The binary string representation of the array
These informations together can be used from the receiver code to recreate
uniquely the original array.
@param nparray: A numpy ndarray
@type nparray: numpy.ndarray
@rtype: list
@return: [dtype, shape, array]
"""
_shape = numpy.array(nparray.shape, dtype=numpy.int32).tostring()
return [nparray.dtype.name, _shape, nparray.tostring()]
def msg_to_info(msg):
_shape = numpy.fromstring(msg[1], dtype=numpy.int32)
return [msg[0], _shape, msg[2]]
def msg_to_array(msg):
"""
Parse a list argument as returned by L{array_to_msg} function of this
module, and returns the numpy array contained in the message body.
@param msg: a list as returned by L{array_to_msg} function
@rtype: numpy.ndarray
@return: The numpy array contained in the message
"""
[_dtype, _shape, _bin_msg] = msg_to_info(msg)
return numpy.fromstring(_bin_msg, dtype=_dtype).reshape(tuple(_shape))
def numpy_array_sender(name, endpoint, socket_type=zmq.PUSH):
"""
Decorator Factory
The decorated function will have to return a numpy array, while the
decorator will create a zmq socket of the specified socket type connected
to the specified endpoint.
Each time the function is called the numpy array will be sent over the
instantiated transport after being converted to a multipart message using
L{array_to_msg} function. The multipart message is prepended with a UUID
and the given name as the first two elements.
#TODO: Would it be good to add the possibility of transimitting arbitrary
metadata? --- Marco Bartolini 27/04/2012
Usage example::
import zmq
import zmqnumpy
import numpy
@zmqnumpy.numpy_array_sender(\"mysender\", \"tcp://127.0.0.1:8765\")
def random_array_generator(min, max, width):
return numpy.random.randint(min, max, width)
@type name: string
@param name: the label of the data stream
@type endpoint: string
@param endpoint: a zmq endpoint made as \"protocol://host:port\"
@param socket_type: a zmq socket type such as zmq.PUSH or zmq.PUB
"""
_context = zmq.Context.instance()
_socket = _context.socket(socket_type)
_socket.connect(endpoint)
_uuid = uuid.uuid4().bytes
def wrapper(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
_data = fn(*args, **kwargs)
_socket.send_multipart([_uuid, name] + array_to_msg(_data))
return wrapped
return wrapper | zmqnumpy | /zmqnumpy-0.1.tar.gz/zmqnumpy-0.1/zmqnumpy.py | zmqnumpy.py |
<div align="center">
<img width="600" src="./logo.png" alt="ZmsAi" />
</div>
---
  
[](https://saythanks.io/to/architdwivedi.off%40gmail.com) [](https://github.com/psf/black)
A command line utility for topic discovery and doc-linking within the Zettelkasten using AI approaches.
## Installation
Install `zmsai` by executing the following command-
```shell
$ pip3 install zmsai
```
## Test Run
Test run using dummy docs (see `./custom`)
```shell
$ zmsai test
```
## Usage
To learn `n` topics in your Zettelkasten at `/path/to/your/zettelkasten/`-
```shell
$ zmsai run -t n -p "/path/to/your/zettelkasten/"
```
This will create a metadata file `meta.zms` storing all the distributions exhibited by the documents in your Zettelkasten.
```shell
[Running the model] it may take a while. Hang tight!
[Data stored] ... kb
```
You can delete your metadata file by executing-
```shell
$ zmsai delete
```
These learnt distributions can be printed using `zmsai display`. You can pass an additional argument `-w`, the number of top most occuring words that you want to print from the distributions involing words.
To display doc-topic distribution-
```shell
$ zmsai display -d dt
```
To display topic-word distribution-
```shell
$ zmsai display -w n -d tw
```
To display doc-word distribution-
```shell
$ zmsai display -w n -d dw
```
To display vocabulary-
```shell
$ zmsai display -w n -d voc
```
To display all distributions at once-
```shell
$ zmsai display -w n -d all
```
or simply
```shell
$ zmsai display
```
This will take default value of 5 for `nwords` argument.
## Troubleshooting
If you get `ModuleNotFoundError: No module named 'sklearn'` error with `display`, try installing `scikit-learn` manually.
```shell
$ sudo pip3 install -U scikit-learn
```
Alternatively, if you're on ubuntu, try executing the following command-
```shell
$ zmsai fix-ubuntu
```
Feel free to raise an issue if you feel stuck.
## Manual
```shell
usage: zmsai [-h] [--path [PATH]] [--topics [TOPICS]] [--nwords [NWORDS]] [--distro [DISTRO]] [task]
positional arguments:
task Provide task to perform [default : 'run'] [values : 'run', 'delete', 'display', 'man', 'test', 'fix-ubuntu']
optional arguments:
-h, --help show this help message and exit
--path [PATH], -p [PATH]
Provide directory of text files. [with : 'run'] [default : './custom']
--topics [TOPICS], -t [TOPICS]
How many topics do you expect? [with : 'run'] [default : 'number of docs']
--nwords [NWORDS], -w [NWORDS]
How many words per topic/doc do you want to display? [with : 'display'] [default : 5]
--distro [DISTRO], -d [DISTRO]
What distributions do you want to display? [with : 'display'] [default : all] [values : 'dt', 'tw', 'dw', 'voc', 'all']
```
## Dependency Graph
```shell
attrs==20.2.0
- pytest==6.1.1 [requires: attrs>=17.4.0]
iniconfig==1.1.1
- pytest==6.1.1 [requires: iniconfig]
joblib==0.17.0
- scikit-learn==0.23.2 [requires: joblib>=0.11]
- sklearn==0.0 [requires: scikit-learn]
numpy==1.19.2
- scikit-learn==0.23.2 [requires: numpy>=1.13.3]
- sklearn==0.0 [requires: scikit-learn]
- scipy==1.5.3 [requires: numpy>=1.14.5]
- scikit-learn==0.23.2 [requires: scipy>=0.19.1]
- sklearn==0.0 [requires: scikit-learn]
pip==20.1.1
pluggy==0.13.1
- pytest==6.1.1 [requires: pluggy>=0.12,<1.0]
py==1.9.0
- pytest==6.1.1 [requires: py>=1.8.2]
pyparsing==2.4.7
- packaging==20.4 [requires: pyparsing>=2.0.2]
- pytest==6.1.1 [requires: packaging]
setuptools==46.4.0
six==1.15.0
- packaging==20.4 [requires: six]
- pytest==6.1.1 [requires: packaging]
threadpoolctl==2.1.0
- scikit-learn==0.23.2 [requires: threadpoolctl>=2.0.0]
- sklearn==0.0 [requires: scikit-learn]
toml==0.10.1
- pytest==6.1.1 [requires: toml]
wheel==0.34.2
```
## Contribution
Are welcome.
## License
[GNU General Public License v3 (GPLv3)](https://www.gnu.org/licenses/gpl-3.0) | zmsai | /zmsai-0.1.5.tar.gz/zmsai-0.1.5/README.md | README.md |
__author__ = 'Zhang Fan'
import copy
import threading
_message_saver = {}
_main_lock = threading.Lock()
_msg_tag_lock_saver = {}
def create_receiver(msg_tag: str, receiver_func, priority=999):
'''
创建一个接收器
:param msg_tag: 设置接收哪个消息标签的信息
:param receiver_func: 接收器触发函数
:param priority: 优先级(数字越小越先收到广播, 距离相同的接收器顺序随机)
'''
assert hasattr(receiver_func, '__call__')
with _main_lock:
if msg_tag not in _message_saver:
_message_saver[msg_tag] = {}
_msg_tag_lock_saver[msg_tag] = threading.Lock()
receiver_saver = _message_saver[msg_tag]
msg_tag_lock = _msg_tag_lock_saver[msg_tag]
with msg_tag_lock:
receiver_saver[receiver_func] = priority
def destroy_receiver(msg_tag: str, receiver_func):
'''
销毁一个接收器
:param msg_tag: 消息标签
:param receiver_func: 接收器触发函数
'''
with _main_lock:
if msg_tag not in _message_saver:
return
receiver_saver = _message_saver[msg_tag]
msg_tag_lock = _msg_tag_lock_saver[msg_tag]
with msg_tag_lock:
if receiver_func in receiver_saver:
del receiver_saver[receiver_func]
def destroy_receiver_of_msg_tag(msg_tag: str):
'''
销毁指定消息标签的所有接收器
:param msg_tag: 消息标签
'''
with _main_lock:
if msg_tag not in _message_saver:
return
del _message_saver[msg_tag]
del _msg_tag_lock_saver[msg_tag]
def destroy_all_receiver():
'''
销毁所有的接收器
'''
with _main_lock:
_message_saver.clear()
_msg_tag_lock_saver.clear()
def _receiver_saver_priority_sorted(item):
return item[1]
def send(msg_tag: str, *args, stop_send_flag=None, **kwargs):
'''
发送数据
:param msg_tag: 消息标签
:param stop_send_flag: 停止发送标记, 如果有一个接收器返回这个标记则停止发送(内部使用is判断)并且返回True
:return: 返回一个列表, 这个列表包含所有接收器的返回值(无序), 无接收器返回一个空列表
'''
with _main_lock:
if msg_tag not in _message_saver:
return []
receiver_saver = _message_saver[msg_tag]
msg_tag_lock = _msg_tag_lock_saver[msg_tag]
with msg_tag_lock:
receiver_saver = copy.copy(receiver_saver)
receiver_saver = sorted(receiver_saver.items(), key=_receiver_saver_priority_sorted)
result_list = []
for receiver_func, priority in receiver_saver:
result = receiver_func(*args, **kwargs)
if result is stop_send_flag and stop_send_flag is not None:
return True
result_list.append(result)
return result_list
def sender(msg_tag: str, stop_send_flag=None):
'''
装饰器(发送者)
:param msg_tag: 消息标签
:param stop_send_flag: 停止发送标记, 如果有一个接收器返回这个标记则停止发送(内部使用is判断)并且返回True
'''
def decorator(sender_func):
def new_func(*args, **kwargs):
return send(msg_tag, *args, stop_send_flag=stop_send_flag, **kwargs)
return new_func
return decorator
def receiver(msg_tag: str, priority=999):
'''
装饰器(接收器)
:param msg_tag: 消息标签
:param priority: 优先级(数字越小越先收到广播, 数字相同的接收器顺序随机)
'''
def decorator(receiver_func):
create_receiver(msg_tag, receiver_func, priority=priority)
return receiver_func
return decorator | zmsgcentre | /zmsgcentre-2.0.1-py3-none-any.whl/zmsgcentre.py | zmsgcentre.py |
import uvicorn as uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import const
#here we just set the N is 50,it could be 500,5000 or whatever
#the geometric_series_sum could be evaluated quickly.
const.N=50
app = FastAPI()
# - Find and fix the errors in the following program
# - Refactor the program to make it more modular (using a Pythonic style, e.g. numpy)
# - Implement an additional function geometric_series_sum() which takes an input parameter a and r
# to calculate the sum of the first N elements of: a + a*r + a*r^2 + a*r^3 + ...
# - Test the application (what approach would you use for test?)
class Msg(BaseModel):
msg: str
class GenerateData(BaseModel):
a: int
r: int
@app.post("/isGeometricSeries")
async def is_geometric_series_post(inp: Msg):
result=False
try:
values_list = inp.msg.split(",")
values_int_list=[int(i) for i in values_list]
result = check_if_geometric_series(values_int_list)
except ValueError:
pass
except:
pass
return {"The input sequence is geometric": result}
@app.post("/geometricSeriesSum")
async def geometric_series_sum(inp: GenerateData):
try:
if inp.r != 0:
sum = geometric_sum(inp.a,inp.r)
return {"The sum of geometric series is":sum}
else:
return {"The r cannot be zero":inp.r}
except Exception as err:
return {"The server internal error":err}
#the formula for the sum of geometric series is following:
#sum=a*(1-r^N)/(1-r) when r!=1 or
#sum=Na when r=1
def geometric_sum(a:int,r:int) -> int:
return const.N * a if r==1 else a * (1-exponent(r,const.N))/(1-r)
#base parameter: power base,e.g. 2 in 2^3.
#exp parameter : the exponent of base, e.g. 3 in 2^3
#the algorithm here is binary multiplication,a quick way to get the value.
def exponent(base,exp) ->int:
result = 0;
if exp % 2==1:
result = base * exponent(base,exp-1);
elif exp == 0:
result = 1;
elif exp == 2:
result = base*base;
else:
half = exponent(base, exp / 2);
result = half * half;
return result;
def check_if_geometric_series(series: list) -> bool:
"""
Example:
check_if_geometric_series([3, 6, 12, 24])
True
check_if_geometric_series([1, 2, 3, 4])
False
"""
try:
common_ratio = series[1] / series[0]
for index in range(len(series) - 1):
if series[index + 1] / series[index] != common_ratio:
return False
except ZeroDivisionError:
return False
else:
return True
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8001) | zmt-geometric | /zmt_geometric-1.0.1.tar.gz/zmt_geometric-1.0.1/zmt_geometric/main.py | main.py |
# zmxtools
[](https://pypi.org/project/zmxtools/)
[](https://github.com/wemake-services/wemake-python-styleguide)
[](https://www.python.org/downloads)
[](https://opensource.org/licenses/AGPL-3.0)
[](https://github.com/tttom/ZmxTools/tree/master/python)
[](https://pypi.org/project/zmxtools/tree/master/python)
[](https://pypi.org/project/zmxtools/#files)
[](https://pypi.org/project/zmxtools/)
[](https://github.com/tttom/ZmxTools)
[](https://github.com/tttom/ZmxTools)
[](https://libraries.io/pypi/zmxtools)
[](https://readthedocs.org/projects/zmxtools)
A toolkit to read Zemax files.
Currently this is limited to unpacking ZAR archives. To parse the files contained within the archive, e.g. ZMX or AGF
glass files. For further processing, please check the [list of related software](#related-software) below.
## Features
- Unpack a Zemax OpticStudio Archive ZAR file using the `unzar` command.
- Repack a ZAR file as a standard zip file using the `unzar -z` command.
- Use as a pure Python 3 library.
- Fully typed with annotations and checked with mypy, [PEP561 compatible](https://www.python.org/dev/peps/pep-0561/)
## Installation
### Prerequisites
- Python 3.7 (tested on Python 3.8)
- pip, the Python package manager
To install `zmxtools`, just run the following command in a command shell:
```bash
pip install zmxtools
```
## Usage
This package can be used directly from a terminal shell or from your own Python code.
Example files can be found on manufacturer's sites such as [Thorlabs Inc](https://www.thorlabs.com).
### Command line shell
The command `unzar` is added to the path upon installation. It permits the extraction of the zar-file to a sub-directory
as well as its conversion to a standard zip-file. For example, extracting to the sub-directory `mylens` is done using
```console
unzar mylens.zar
```
Repacking the same zar-archive as a standard zip-archive `mylens.zip` is done with:
```console
unzar mylens.zar -z
```
Multiple input files and an alternative output directory can be specified:
```console
unzar -i *.zar -o some/where/else/
```
Find out more information and alternative options using:
```console
unzar -h
```
### As a Python library
Extraction and repacking can be done programmatically as follows:
```python
from zmxtools import zar
zar.extract('mylens.zar')
zar.repack('mylens.zar')
zar.read('mylens.zar')
```
Python `pathlib.Path` objects can be used instead of strings.
## Online
The latest version of the
- source code can be found on
[github: https://github.com/tttom/zmxtools](https://github.com/tttom/zmxtools)
- API Documentation on https://zmxtools.readthedocs.io/
## License
This code is distributed under the
[agpl3: GNU Affero General Public License](https://www.gnu.org/licenses/agpl-3.0.en.html)
## Credits
- [Wouter Vermaelen](https://github.com/m9710797) for decoding the ZAR header and finding LZW compressed contents.
- [Bertrand Bordage](https://github.com/BertrandBordage) for sharing this [gist](https://gist.github.com/BertrandBordage/611a915e034c47aa5d38911fc0bc7df9).
- This project was generated with [`wemake-python-package`](https://github.com/wemake-services/wemake-python-package). Current template version is: [cfbc9ea21c725ba5b14c33c1f52d886cfde94416](https://github.com/wemake-services/wemake-python-package/tree/cfbc9ea21c725ba5b14c33c1f52d886cfde94416). See what is [updated](https://github.com/wemake-services/wemake-python-package/compare/cfbc9ea21c725ba5b14c33c1f52d886cfde94416...master) since then.
## Related Software
- [Optical ToolKit](https://github.com/draustin/otk) reads Zemax .zmx files.
- [RayTracing](https://github.com/DCC-Lab/RayTracing) reads Zemax .zmx files.
- [Zemax Glass](https://github.com/nzhagen/zemaxglass) reads Zemax .agf files.
- [RayOptics](https://github.com/mjhoptics/ray-optics) reads Zemax .zmx and CODE-V .seq files.
- [RayOpt](https://github.com/quartiq/rayopt) reads Zemax .zmx as well as OSLO files.
- [OpticsPy](https://github.com/Sterncat/opticspy) does not read Zemax .zmx files but reads CODE-V .seq files and
glass information from data downloaded from https://www.refractiveindex.info/.
- [OpticalGlass](https://github.com/mjhoptics/opticalglass) reads glass manufacturer Excel sheets.
| zmxtools | /zmxtools-0.1.3.tar.gz/zmxtools-0.1.3/README.md | README.md |
# Zn equivalence classes operation table generator
Simple package to obtain Zn's equivalence classes sum or product operation tables.
The build_table method takes an positive integer (n) and 'sum' or 'prod' as first and second arguments.
It will return a nxn matrix with the result of operating every item with each other.
## Installation:
``
pip install zn_operation_table
``
## Example
``
build_table(3, 'sum')
``
Will return:
``
[[0,1,2], [1,2,0], [2,0,1]]
``
## build_table function
build_table(n, operation, headers, inversibles)
n: positive integer.
operation: 'sum' for class sum and 'prod' for class product.
headers: for row and column headers.
inversibles: tu use the given set's inversibles for the given operation.
| zn-operation-table | /zn_operation_table-1.1.5.tar.gz/zn_operation_table-1.1.5/README.md | README.md |
# zn
Zinc
## Development Environment
### Setup
Follow these steps to create a development environment for Zinc:
cd ~/projects
git clone [email protected]:blinkdog/zn.git
cd zn
python3.7 -m venv ./env
source env/bin/activate
pip install --upgrade pip
pip install -r requirements.txt
### Maintenance
If you install a new package using `pip install` then update the
`requirements.txt` file with the following command:
pip freeze --all >requirements.txt
### Working
The helper script `snake` defines some common project tasks:
Try one of the following tasks:
snake clean # Remove build cruft
snake coverage # Perform coverage analysis
snake dist # Create a distribution tarball and wheel
snake lint # Run static analysis tools
snake publish # Publish the module to Test PyPI
snake rebuild # Test and lint the module
snake test # Test the module
The task `rebuild` doesn't really build (no need to compile Python),
but it does run the unit tests and lint the project.
#### Version Bumping
If you need to increase the version number of the project, don't
forget to edit the following:
CHANGELOG.md
setup.py
| zn | /zn-0.0.1.tar.gz/zn-0.0.1/README.md | README.md |
# Znail

Znail is a network emulator inteded to run on a Raspberry Pi.
Equipped with two network interfaces, the Raspberry Pi acts as a network bridge.
Znail can then control network traffic passing through the bridge.
With a system under test connected to the network through this bridge,
Znail can help you answer question about how that system behaves under various network conditions.
# Features
* Emulate packet delay
* Emulate packet loss
* Emulate packet duplication
* Emulate packet reordering
* Emulate packet corruption
* Control packet rate
* Capture network packets
* Emulate a disconnect (by powering down one of its network interfaces)
* Override answers to DNS queries (by redirecting DNS traffic to its internal DNS server)
* Redirect IP traffic from one host to another
* Not apply any of the above for certain hosts using a whitelist
Znail can be managed in one of two ways, using its web interface or its REST API.
# Getting Started
The easiest way to get started with Znail is to [download](https://github.com/znailnetem/znail/releases/latest) a Rasbian image with Znail pre-installed.
The image can then be [installed](https://www.raspberrypi.org/documentation/installation/installing-images/README.md) on a Raspberry Pi.
# Generating a Custom Image
A virtual machine is used to build the image.
`Vagrant` and the `vagrant-scp` plugin is required.
To set up the environment on recent Ubuntu systems, run the following commands:
sudo apt update
sudo apt install vagrant
vagrant plugin install vagrant-scp
To generate an image:
make image
The resulting image can be found in the `dist/image` directory.
# Development
The Python environment requires that the `pip` tool is installed.
To set up the development environment on recent Ubuntu systems, run the following commands:
sudo apt update
sudo apt install python3-pip python3-venv
To build and activate the virtual Python environment:
source ./activate
To automatically format the code:
make format
To run tests and static code analysis:
make check
More information about what targets the build system provides:
make help
# Special Thanks
Special thanks to Alice Persson for contributing the Znail logotype.
# License
Distributed under the terms of the Apache License 2.0.
| znail | /znail-0.5.0.tar.gz/znail-0.5.0/README.md | README.md |
znbdownload
=====================================================
Upload media files to S3 and add support for private files.
Features
------------------------------------------------------------------------------
-
Installing and Uninstalling Packages
------------------------------------------------------------------------------
Installing in editable mode from local directory.
.. code-block:: bash
$ pip install -e /path/to/znbdownload/
You can remove the -e to install the package in the corresponding Python path, for example: /env/lib/python3.7/site-packages/znbdownload.
List installed packages and uninstall.
.. code-block:: bash
$ pip list
$ pip uninstall znbdownload
Installing from git using https.
.. code-block:: bash
$ pip install git+https://github.com/requests/requests.git#egg=requests
$ pip install git+https://github.com/alexisbellido/znbdownload.git#egg=znbdownload
This package could be added to a pip requirements.txt file from its git repository or source directory.
.. code-block:: bash
git+https://github.com/alexisbellido/znbdownload.git#egg=znbdownload
-e /path-to/znbdownload/
or from PyPi, in this case passing a specific version.
.. code-block:: bash
znbdownload==0.2
ZnbDownload will require, and install if necessary, Django, boto3 and django-storages.
Updating Django Settings
---------------------------------------------------------------------------------------
Add the following to INSTALLED_APPS
.. code-block:: bash
'znbdownload.apps.ZnbDownloadConfig'
Make sure these two are also installed.
.. code-block:: bash
'storages'
'django.contrib.staticfiles'
Amazon S3
-----------------------------------------------
Some notes to use S3 for storing Django files.
Cross-origin resource sharing (CORS) defines a way for client web applications that are loaded in one domain to interact with resources in a different domain.
More on `S3 access permissions <https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html>`_.
Option 1 (preferred): Resource-based policy.
A bucket configured to be allow publc read access and full control by a IAM user that will be used from Django.
Create a IAM user. Write down the arn and user credentials (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY).
Don't worry about adding a user policy as you will be using a bucket policy to refer to this user by its arn.
Create an S3 bucket at url-of-s3-bucket.
Assign it the following CORS configuration in the permissions tab.
.. code-block:: bash
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<MaxAgeSeconds>3000</MaxAgeSeconds>
<AllowedHeader>Authorization</AllowedHeader>
</CORSRule>
</CORSConfiguration>
Go to permissions, public access settings for the bucket and set these options to false or you won't be able to use * as Principal in the bucket policy:
.. code-block:: bash
Block new public ACLs and uploading public objects (Recommended)
Remove public access granted through public ACLs (Recommended)
Block new public bucket policies (Recommended)
Block public and cross-account access if bucket has public policies (Recommended)
and the following bucket policy (use the corresponding arn for the bucket and for the IAM user that will have full control).
.. code-block:: bash
{
"Version": "2012-10-17",
"Id": "name-of-bucket",
"Statement": [
{
"Sid": "PublicReadForGetBucketObjects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::name-of-bucket/*"
},
{
"Sid": "FullControlForBucketObjects",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::364908532015:user/name-of-user"
},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::name-of-bucket",
"arn:aws:s3:::name-of-bucket/*"
]
}
]
}
Option 2: user policy.
A user configured to control an specific bucket.
Create an S3 bucket at url-of-s3-bucket.
Assign it the following CORS configuration in the permissions tab.
.. code-block:: bash
<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<MaxAgeSeconds>3000</MaxAgeSeconds>
<AllowedHeader>Authorization</AllowedHeader>
</CORSRule>
</CORSConfiguration>
Create a user in IAM and assign it to this policy.
.. code-block:: bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1394043345000",
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::url-of-s3-bucket/*"
]
}
]
}
Then create the user credentials (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) to connect from Django.
| znbdownload | /znbdownload-0.1.tar.gz/znbdownload-0.1/README.rst | README.rst |
Development Notes
==================================================================================
**Important**: Do not share this Docker image with your private key information.
Using a temporary, local Docker container with an ssh private key and some Python 3 packages for initial tests.
Change to the root directory of this repository, where the Dockerfile and setup.py files are, and build the image.
.. code-block:: bash
$ docker build -t znbdownload .
Optional: Use a username (example here), a version number and $(date) to tag the image.
.. code-block:: bash
$ docker build --build-arg SSH_PRIVATE_KEY="$(cat ~/.ssh/id_rsa)" -t example/znbdownload:0.1-$(date +%Y%m%d) .
While still in the same directory, run the container and make sure you don't map over /root in the container because that's where ssh key from the host is stored. Replace image:tag with what you used above, for example, znbdownload:latest or example/znbdownload:0.1-20190306.
.. code-block:: bash
$ docker run -it --rm --mount type=bind,source=$PWD,target=/root/project image:tag docker-entrypoint.sh /bin/bash
This will map /root/project inside the container to the host directory where setup.py is, the root of the repository, and set the Python environment so that pip can do its job.
List the installed packages.
.. code-block:: bash
$ pip list
Install into the environment's Python path.
.. code-block:: bash
$ pip install /root/project/
or install in editable mode so that nothing is copied and you can make changes in the source code.
.. code-block:: bash
$ pip install -e /root/project/
To uninstall the package.
.. code-block:: bash
$ pip uninstall znbdownload
Configuration and Django settings.py
------------------------------------------------------------------------------
Review partial settings files production.py and locals3.py in docs directory.
Distribute as a setuptools-based Package
------------------------------------------------------------------------------
This can be run from a host or a container. My tests have been on a container.
.. code-block:: bash
$ pip install setuptools wheel twine
Run this from the same directory where setup.py is located.
.. code-block:: bash
$ python setup.py sdist bdist_wheel
Upload to Test PyPi at `<https://test.pypi.org>`_.
$ twine upload --repository-url https://test.pypi.org/legacy/ dist/*
The package is now available at `<https://test.pypi.org/project/znbdownload/>`_ and can be installed with pip.
.. code-block:: bash
$ pip install -i https://test.pypi.org/simple/ znbdownload
Upload to the real PyPi at `<https://pypi.org>`_.
.. code-block:: bash
$ twine upload dist/*
The package is now available at `<https://pypi.org/project/znbdownload/>`_ and can be installed with pip.
.. code-block:: bash
$ pip install znbdownload
Additional Resources
------------------------------------------------------------------------------
* `packaging projects <https://packaging.python.org/tutorials/packaging-projects>`_.
* `sample project on GitHub <https://github.com/pypa/sampleproject>`_.
* `setuptools <https://setuptools.readthedocs.io/en/latest/setuptools.html>`_.
* `pip install <https://pip.pypa.io/en/stable/reference/pip_install>`_ documentation.
* `include additional files with distribution <https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files>`_.
| znbdownload | /znbdownload-0.1.tar.gz/znbdownload-0.1/docs/DEVELOPMENT.rst | DEVELOPMENT.rst |
znbstatic
=====================================================
Custom Django storage backend.
Features
------------------------------------------------------------------------------
- Storage of assets managed by collectstatic on Amazon Web Services S3.
- Versioning using a variable from Django's settings (https://example.com/static/css/styles.css?v=1.2)
Installing and Uninstalling Packages
------------------------------------------------------------------------------
Installing in editable mode from local directory.
.. code-block:: bash
$ pip install -e /path/to/znbstatic/
You can remove the -e to install the package in the corresponding Python path, for example: /env/lib/python3.7/site-packages/znbstatic.
List installed packages and uninstall.
.. code-block:: bash
$ pip list
$ pip uninstall znbstatic
Installing from git using https.
.. code-block:: bash
$ pip install git+https://github.com/requests/requests.git#egg=requests
$ pip install git+https://github.com/alexisbellido/znbstatic.git#egg=znbstatic
This package could be added to a pip requirements.txt file from its git repository or source directory.
.. code-block:: bash
git+https://github.com/alexisbellido/znbstatic.git#egg=znbstatic
-e /path-to/znbstatic/
or from PyPi, in this case passing a specific version.
.. code-block:: bash
znbstatic==0.2
Znbstatic will require, and install if necessary, Django, boto3 and django-storages.
Updating Django Settings
---------------------------------------------------------------------------------------
Add the following to INSTALLED_APPS
.. code-block:: bash
'znbstatic.apps.ZnbStaticConfig'
Make sure these two are also installed.
.. code-block:: bash
'storages'
'django.contrib.staticfiles'
Add the znbstatic.context_processors.static_urls context processor to the correspoding template engine.
Update or insert the following attributes.
# if hosting the static files locally.
# STATICFILES_STORAGE = 'znbstatic.storage.VersionedStaticFilesStorage'
# STATIC_URL = '/static/'
# use the following if using Amazon S3
STATICFILES_STORAGE = 'znbstatic.storage.VersionedS3StaticFilesStorage'
AWS_ACCESS_KEY_ID = 'your-access-key-id'
AWS_SECRET_ACCESS_KEY = 'your-secret-access-key'
AWS_STORAGE_STATIC_BUCKET_NAME = 'static.example.com'
# where is this used?
AWS_S3_HOST = 's3.amazonaws.com'
S3_USE_SIGV4 = True
AWS_QUERYSTRING_AUTH = False
AWS_DEFAULT_ACL = 'public-read'
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_STATIC_BUCKET_NAME
ZNBSTATIC_VERSION = '0.1'
Amazon S3
-----------------------------------------------
Some notes to use S3 for storing Django files.
Cross-origin resource sharing (CORS) defines a way for client web applications that are loaded in one domain to interact with resources in a different domain.
More on `S3 access permissions <https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html>`_.
Option 1 (preferred): Resource-based policy.
A bucket configured to be allow publc read access and full control by a IAM user that will be used from Django.
Create a IAM user. Write down the arn and user credentials (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY).
Don't worry about adding a user policy as you will be using a bucket policy to refer to this user by its arn.
Create an S3 bucket at url-of-s3-bucket.
Assign it the following CORS configuration in the permissions tab.
.. code-block:: bash
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<MaxAgeSeconds>3000</MaxAgeSeconds>
<AllowedHeader>Authorization</AllowedHeader>
</CORSRule>
</CORSConfiguration>
Go to permissions, public access settings for the bucket and set these options to false or you won't be able to use * as Principal in the bucket policy:
.. code-block:: bash
Block new public ACLs and uploading public objects (Recommended)
Remove public access granted through public ACLs (Recommended)
Block new public bucket policies (Recommended)
Block public and cross-account access if bucket has public policies (Recommended)
and the following bucket policy (use the corresponding arn for the bucket and for the IAM user that will have full control).
.. code-block:: bash
{
"Version": "2012-10-17",
"Id": "name-of-bucket",
"Statement": [
{
"Sid": "PublicReadForGetBucketObjects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::name-of-bucket/*"
},
{
"Sid": "FullControlForBucketObjects",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::364908532015:user/name-of-user"
},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::name-of-bucket",
"arn:aws:s3:::name-of-bucket/*"
]
}
]
}
Option 2: user policy.
A user configured to control an specific bucket.
Create an S3 bucket at url-of-s3-bucket.
Assign it the following CORS configuration in the permissions tab.
.. code-block:: bash
<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<CORSRule>
<AllowedOrigin>*</AllowedOrigin>
<AllowedMethod>GET</AllowedMethod>
<MaxAgeSeconds>3000</MaxAgeSeconds>
<AllowedHeader>Authorization</AllowedHeader>
</CORSRule>
</CORSConfiguration>
Create a user in IAM and assign it to this policy.
.. code-block:: bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Stmt1394043345000",
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": [
"arn:aws:s3:::url-of-s3-bucket/*"
]
}
]
}
Then create the user credentials (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) to connect from Django.
| znbstatic | /znbstatic-0.5.tar.gz/znbstatic-0.5/README.rst | README.rst |
Development Notes
==================================================================================
**Important**: Do not share this Docker image with your private key information.
Using a temporary, local Docker container with an ssh private key and some Python 3 packages for initial tests.
Change to the root directory of this repository, where the Dockerfile and setup.py files are, and build the image.
.. code-block:: bash
$ docker build -t znbstatic .
Optional: Use a username (example here), a version number and $(date) to tag the image.
.. code-block:: bash
$ docker build --build-arg SSH_PRIVATE_KEY="$(cat ~/.ssh/id_rsa)" -t example/znbstatic:0.1-$(date +%Y%m%d) .
While still in the same directory, run the container and make sure you don't map over /root in the container because that's where ssh key from the host is stored. Replace image:tag with what you used above, for example, znbstatic:latest or example/znbstatic:0.1-20190306.
.. code-block:: bash
$ docker run -it --rm --mount type=bind,source=$PWD,target=/root/project image:tag docker-entrypoint.sh /bin/bash
This will map /root/project inside the container to the host directory where setup.py is, the root of the repository, and set the Python environment so that pip can do its job.
List the installed packages.
.. code-block:: bash
$ pip list
Install into the environment's Python path.
.. code-block:: bash
$ pip install /root/project/
or install in editable mode so that nothing is copied and you can make changes in the source code.
.. code-block:: bash
$ pip install -e /root/project/
To uninstall the package.
.. code-block:: bash
$ pip uninstall znbstatic
Configuration and Django settings.py
------------------------------------------------------------------------------
Review partial settings files production.py and locals3.py in docs directory.
Distribute as a setuptools-based Package
------------------------------------------------------------------------------
This can be run from a host or a container. My tests have been on a container.
.. code-block:: bash
$ pip install setuptools wheel twine
Run this from the same directory where setup.py is located.
.. code-block:: bash
$ python setup.py sdist bdist_wheel
Upload to Test PyPi at `<https://test.pypi.org>`_.
$ twine upload --repository-url https://test.pypi.org/legacy/ dist/*
The package is now available at `<https://test.pypi.org/project/znbstatic/>`_ and can be installed with pip.
.. code-block:: bash
$ pip install -i https://test.pypi.org/simple/ znbstatic
Upload to the real PyPi at `<https://pypi.org>`_.
.. code-block:: bash
$ twine upload dist/*
The package is now available at `<https://pypi.org/project/znbstatic/>`_ and can be installed with pip.
.. code-block:: bash
$ pip install znbstatic
Additional Resources
------------------------------------------------------------------------------
* `packaging projects <https://packaging.python.org/tutorials/packaging-projects>`_.
* `sample project on GitHub <https://github.com/pypa/sampleproject>`_.
* `setuptools <https://setuptools.readthedocs.io/en/latest/setuptools.html>`_.
* `pip install <https://pip.pypa.io/en/stable/reference/pip_install>`_ documentation.
* `include additional files with distribution <https://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files>`_.
| znbstatic | /znbstatic-0.5.tar.gz/znbstatic-0.5/docs/DEVELOPMENT.rst | DEVELOPMENT.rst |
[](https://github.com/zincware)
[](https://badge.fury.io/py/zndraw)
[](https://doi.org/10.5281/zenodo.8304530)

# ZnDraw
Install via `pip install zndraw`. If you have `pip install pywebview` installed,
ZnDraw will open in a dedicated window.
## CLI
You can use ZnDraw with the CLI `zndraw atoms.xyz`. Alternatively, you can use
zndraw from within a Juypter Notebook
```python
from zndraw import ZnDraw
import ase
zndraw = ZnDraw()
zndraw
# this will show you the molecule
# new cell
zndraw.socket.sleep(2) # give it some time to fully connect
zndraw[0] = ase.Atoms(
"H2O", positions=[[0.75, -0.75, 0], [0.75, 0.75, 0], [0, 0, 0]]
)
```
or a normal Python script using `zndraw = ZnDraw(jupyter=False)` to open a
browser window.
ZnDraw is designed to work with your Python scripts. To interface you can
inherit from `zndraw.examples.UpdateScene` or follow this base class:
```python
import abc
from pydantic import BaseModel
class UpdateScene(BaseModel, abc.ABC):
@abc.abstractmethod
def run(self, atom_ids: list[int], atoms: ase.Atoms, **kwargs) -> list[ase.Atoms]:
pass
```
The `run` method expects as inputs
- atom_ids: list\[int\], the ids of the currently selected atoms
- atoms: ase.Atoms, the configuration as `ase.Atoms` file where atom_ids where
selected.
- kwargs: dict could be additional information from the scene
and as an output:
- list\[ase.Atoms\], a list of ase Atoms objects to display.
You can define the parameters using `pydantic.Field` which will be displayed in
the UI.
```python
class MyUpdateCls(UpdateScene):
steps: int = Field(100, le=1000, ge=1)
x: float = Field(0.5, le=5, ge=0)
symbol: str = Field("same")
```
To add your method click on the `+` on the right side of the window. Your should
be able to add your method from the working directory via `module.MyUpdateCls`
as long as it can be imported via `from module import MyUpdateCls`.



# Development
ZnDraw is developed using https://python-poetry.org/. Furthermore, the
javascript packages have to be installed using https://www.npmjs.com/.
```bash
cd zndraw/static/
npm install
```
| zndraw | /zndraw-0.2.0a16.tar.gz/zndraw-0.2.0a16/README.md | README.md |
[](https://github.com/zincware)
[](https://coveralls.io/github/zincware/ZnFlow?branch=main)
[](https://badge.fury.io/py/znflow)
[](https://mybinder.org/v2/gh/zincware/ZnFlow/HEAD)
# ZnFlow
The `ZnFlow` package provides a basic structure for building computational
graphs based on functions or classes. It is designed as a lightweight
abstraction layer to
- learn graph computing.
- build your own packages on top of it.
## Installation
```shell
pip install znflow
```
## Usage
### Connecting Functions
With ZnFlow you can connect functions to each other by using the `@nodify`
decorator. Inside the `znflow.DiGraph` the decorator will return a
`FunctionFuture` object that can be used to connect the function to other nodes.
The `FunctionFuture` object will also be used to retrieve the result of the
function. Outside the `znflow.DiGraph` the function behaves as a normal
function.
```python
import znflow
@znflow.nodify
def compute_mean(x, y):
return (x + y) / 2
print(compute_mean(2, 8))
# >>> 5
with znflow.DiGraph() as graph:
mean = compute_mean(2, 8)
graph.run()
print(mean.result)
# >>> 5
with znflow.DiGraph() as graph:
n1 = compute_mean(2, 8)
n2 = compute_mean(13, 7)
n3 = compute_mean(n1, n2)
graph.run()
print(n3.result)
# >>> 7.5
```
### Connecting Classes
It is also possible to connect classes. They can be connected either directly or
via class attributes. This is possible by returning `znflow.Connections` inside
the `znflow.DiGraph` context manager. Outside the `znflow.DiGraph` the class
behaves as a normal class.
In the following example we use a dataclass, but it works with all Python
classes that inherit from `znflow.Node`.
```python
import znflow
import dataclasses
@znflow.nodify
def compute_mean(x, y):
return (x + y) / 2
@dataclasses.dataclass
class ComputeMean(znflow.Node):
x: float
y: float
results: float = None
def run(self):
self.results = (self.x + self.y) / 2
with znflow.DiGraph() as graph:
n1 = ComputeMean(2, 8)
n2 = compute_mean(13, 7)
# connecting classes and functions to a Node
n3 = ComputeMean(n1.results, n2)
graph.run()
print(n3.results)
# >>> 7.5
```
## Dask Support
ZnFlow comes with support for [Dask](https://www.dask.org/) to run your graph:
- in parallel.
- through e.g. SLURM (see https://jobqueue.dask.org/en/latest/api.html).
- with a nice GUI to track progress.
All you need to do is install ZnFlow with Dask `pip install znflow[dask]`. We
can then extend the example from above. This will run `n1` and `n2` in parallel.
You can investigate the graph on the Dask dashboard (typically
http://127.0.0.1:8787/graph or via the client object in Jupyter.)
```python
import znflow
import dataclasses
from dask.distributed import Client
@znflow.nodify
def compute_mean(x, y):
return (x + y) / 2
@dataclasses.dataclass
class ComputeMean(znflow.Node):
x: float
y: float
results: float = None
def run(self):
self.results = (self.x + self.y) / 2
with znflow.DiGraph() as graph:
n1 = ComputeMean(2, 8)
n2 = compute_mean(13, 7)
# connecting classes and functions to a Node
n3 = ComputeMean(n1.results, n2)
client = Client()
deployment = znflow.deployment.Deployment(graph=graph, client=client)
deployment.submit_graph()
n3 = deployment.get_results(n3)
print(n3)
# >>> ComputeMean(x=5.0, y=10.0, results=7.5)
```
We need to get the updated instance from the Dask worker via
`Deployment.get_results`. Due to the way Dask works, an inplace update is not
possible. To retrieve the full graph, you can use
`Deployment.get_results(graph.nodes)` instead.
### Working with lists
ZnFlow supports some special features for working with lists. In the following
example we want to `combine` two lists.
```python
import znflow
@znflow.nodify
def arange(size: int) -> list:
return list(range(size))
print(arange(2) + arange(3))
>>> [0, 1, 0, 1, 2]
with znflow.DiGraph() as graph:
lst = arange(2) + arange(3)
graph.run()
print(lst.result)
>>> [0, 1, 0, 1, 2]
```
This functionality is restricted to lists. There are some further features that
allow combining `data: list[list]` by either using
`data: list = znflow.combine(data)` which has an optional `attribute=None`
argument to be used in the case of classes or you can simply use
`data: list = sum(data, [])`.
### Attributes Access
Inside the `with znflow.DiGraph()` context manager, accessing class attributes
yields `znflow.Connector` objects. Sometimes, it may be required to obtain the
actual attribute value instead of a `znflow.Connector` object. It is not
recommended to run class methods inside the `with znflow.DiGraph()` context
manager since it should be exclusively used for building the graph and not for
actual computation.
In the case of properties or other descriptor-based attributes, it might be
necessary to access the actual attribute value. This can be achieved using the
`znflow.get_attribute` method, which supports all features from `getattr` and
can be imported as such:
```python
from znflow import get_attribute as getattr
```
Here's an example of how to use `znflow.get_attribute`:
```python
import znflow
class POW2(znflow.Node):
"""Compute the square of x."""
x_factor: float = 0.5
results: float = None
_x: float = None
@property
def x(self):
return self._x
@x.setter
def x(self, value):
# using "self._x = value * self.x_factor" inside "znflow.DiGraph()" would run
# "value * Connector(self, "x_factor")" which is not possible (TypeError)
# therefore we use znflow.get_attribute.
self._x = value * znflow.get_attribute(self, "x_factor")
def run(self):
self.results = self.x**2
with znflow.DiGraph() as graph:
n1 = POW2()
n1.x = 4.0
graph.run()
assert n1.results == 4.0
```
Instead, you can also use the `znflow.disable_graph` decorator / context manager
to disable the graph for a specific block of code or the `znflow.Property` as a
drop-in replacement for `property`.
# Supported Frameworks
ZnFlow includes tests to ensure compatibility with:
- "Plain classes"
- `dataclasses`
- `ZnInit`
- `attrs`
It is currently **not** compatible with pydantic. I don't know what pydantic
does internally and wasn't able to find a workaround.
| znflow | /znflow-0.1.12.tar.gz/znflow-0.1.12/README.md | README.md |
[](https://github.com/zincware)
[](https://coveralls.io/github/zincware/ZnH5MD?branch=main)
[](https://badge.fury.io/py/znh5md)
[](https://mybinder.org/v2/gh/zincware/ZnH5MD/HEAD)
# ZnH5MD - High Performance Interface for H5MD Trajectories
ZnH5MD allows easy access to simulation results from H5MD trajectories.
## Example
In the following example we investigate an H5MD dump from LAMMPS with 1000 atoms and 201 configurations:
```python
import znh5md
traj = znh5md.DaskH5MD("file.h5", time_chunk_size=500, species_chunk_size=100)
print(traj.file.time_dependent_groups)
# ['edges', 'force', 'image', 'position', 'species', 'velocity']
print(traj.force)
# DaskDataSet(value=dask.array<array, shape=(201, 1000, 3), ...)
print(traj.velocity.slice_by_species(species=1))
# DaskDataSet(value=dask.array<reshape, shape=(201, 500, 3), ...)
print(traj.position.value)
# dask.array<array, shape=(201, 1000, 3), dtype=float64, chunksize=(100, 500, 3), ...>
# You can iterate through the data
for item in traj.position.batch(size=27, axis=0):
for x in item.batch(size=17, axis=1):
print(x.value.compute())
```
## ASE Atoms
You can use ZnH5MD to store ASE Atoms objects in the H5MD format.
> ZnH5MD does not support all features of ASE Atoms objects. It s important to note that unsupported parts are silently ignored and no error is raised.
> The ASEH5MD interface will not provide any time and step information.
> If you have a list of Atoms with different PBC values, you can use `znh5md.io.AtomsReader(atoms, use_pbc_group=True)`. This will create a `pbc` group in `box/` that also contains `step` and `time`. This is not an official H5MD specification so it can cause issues with other tools. If you don't specify this, the pbc of the first atoms in the list will be applied.
```python
import znh5md
import ase
atoms: list[ase.Atoms]
db = znh5md.io.DataWriter(filename="db.h5")
db.initialize_database_groups()
db.add(znh5md.io.AtomsReader(atoms))
data = znh5md.ASEH5MD("db.h5")
data.get_atoms_list() == atoms
```
## CLI
ZnH5MD provides a small set of CLI tools:
- `znh5md view <file.h5>` to view the File using `ase.visualize`
- `znh5md export <file.h5> <file.xyz>` to export the file to `.xyz` or any other supported file format
- `znh5md convert <file.xyz> <file.h5>` to save a `file.xyz` as `file.h5` in the H5MD standard.
| znh5md | /znh5md-0.1.6a0.tar.gz/znh5md-0.1.6a0/README.md | README.md |
[](https://coveralls.io/github/zincware/ZnInit?branch=main)

[](https://badge.fury.io/py/zninit)
[](https://github.com/psf/black/)
[](https://mybinder.org/v2/gh/zincware/ZnInit/HEAD)
[](https://github.com/zincware)
# ZnInit - Automatic Generation of ``__init__`` based on Descriptors
This package provides a base class for ``dataclass`` like structures with the addition of using [Descriptors](https://docs.python.org/3/howto/descriptor.html).
The main functionality is the automatic generation of an keyword-only``__init__`` based on selected descriptors.
The descriptors can e.g. overwrite ``__set__`` or ``__get__`` or have custom metadata associated with them.
The ``ZnInit`` package is used by [ZnTrack](https://github.com/zincware/ZnTrack) to enable lazy loading data from files as well as distinguishing between different types of descriptors such as `zn.params` or `zn.outputs`. An example can be found in the `examples` directory.
# Example
The most simple use case is a replication of a dataclass like structure.
```python
from zninit import ZnInit, Descriptor
class Human(ZnInit):
name: str = Descriptor()
language: str = Descriptor("EN")
# This will generate the following init:
def __init__(self, *, name, language="EN"):
self.name = name
self.language = language
fabian = Human(name="Fabian")
# or
fabian = Human(name="Fabian", language="DE")
```
The benefit of using ``ZnInit`` comes with using descriptors. You can subclass the `zninit.Descriptor` class and only add certain kwargs to the `__init__` defined in `init_descriptors: list`. Furthermore, a `post_init` method is available to run code immediately after initializing the class.
````python
from zninit import ZnInit, Descriptor
class Input(Descriptor):
"""A Parameter"""
class Metric(Descriptor):
"""An Output"""
class Human(ZnInit):
_init_descriptors_ = [Input] # only add Input descriptors to the __init__
name: str = Input()
language: str = Input("DE")
date: str = Metric() # will not appear in the __init__
def _post_init_(self):
self.date = "2022-09-16"
julian = Human(name="Julian")
print(julian) # Human(language='DE', name='Julian')
print(julian.date) # 2022-09-16
print(Input.get_dict(julian)) # {"name": "Julian", "language": "DE"}
````
One benefit of ``ZnInit`` is that it also allows for inheritance.
````python
from zninit import ZnInit, Descriptor
class Animal(ZnInit):
age: int = Descriptor()
class Cat(Animal):
name: str = Descriptor()
billy = Cat(age=4, name="Billy")
````
| zninit | /zninit-0.1.10.tar.gz/zninit-0.1.10/README.md | README.md |
[](https://coveralls.io/github/zincware/ZnIPy?branch=main)
# ZnIPy - Easy imports from Jupyter Notebooks
See [Importing Jupyter Notebooks as Modules](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Importing%20Notebooks.html) for more information.
```python
from znipy import NotebookLoader
module = NotebookLoader().load_module("JupyterNotebook.ipnyb")
hello_world = module.HelloWorld()
```
or with direct imports
```python
import znipy
znipy.register()
from JupyterNotebook import HelloWorld
hello_world = HelloWorld()
```
| znipy | /znipy-0.1.1.tar.gz/znipy-0.1.1/README.md | README.md |
[](https://coveralls.io/github/zincware/ZnJSON?branch=main)
[](https://github.com/psf/black/)
[](https://coveralls.io/github/zincware/ZnJSON?branch=main)
[](https://badge.fury.io/py/znjson)
# ZnJSON
Package to Encode/Decode some common file formats to json
Available via ``pip install znjson``
In comparison to `pickle` this allows having readable json files combined with some
serialized data.
# Example
````python
import numpy as np
import json
import znjson
data = json.dumps(
obj={"data_np": np.arange(2), "data": [x for x in range(10)]},
cls=znjson.ZnEncoder,
indent=4
)
_ = json.loads(data, cls=znjson.ZnDecoder)
````
The resulting ``*.json`` file is partially readable and looks like this:
````json
{
"data_np": {
"_type": "np.ndarray_small",
"value": [
0,
1
]
},
"data": [
0,
1,
2,
3,
4
]
}
````
# Custom Converter
ZnJSON allows you to easily add custom converters.
Let's write a serializer for ``datetime.datetime``.
````python
from znjson import ConverterBase
from datetime import datetime
class DatetimeConverter(ConverterBase):
"""Encode/Decode datetime objects
Attributes
----------
level: int
Priority of this converter over others.
A higher level will be used first, if there
are multiple converters available
representation: str
An unique identifier for this converter.
instance:
Used to select the correct converter.
This should fulfill isinstance(other, self.instance)
or __eq__ should be overwritten.
"""
level = 100
representation = "datetime"
instance = datetime
def encode(self, obj: datetime) -> str:
"""Convert the datetime object to str / isoformat"""
return obj.isoformat()
def decode(self, value: str) -> datetime:
"""Create datetime object from str / isoformat"""
return datetime.fromisoformat(value)
````
This allows us to use this new serializer:
````python
znjson.config.register(DatetimeConverter) # we need to register the new converter first
json_string = json.dumps(dt, cls=znjson.ZnEncoder, indent=4)
json.loads(json_string, cls=znjson.ZnDecoder)
````
and will result in
````json
{
"_type": "datetime",
"value": "2022-03-11T09:47:35.280331"
}
````
If you don't want to register your converter to be used everywhere, simply use:
```python
json_string = json.dumps(dt, cls=znjson.ZnEncoder.from_converters(DatetimeConverter))
``` | znjson | /znjson-0.2.2.tar.gz/znjson-0.2.2/README.md | README.md |
# znlib
This package provides you with a CLI to list your installed zincware libraries.
When installing via `pip install znlib[zntrack]` your output should look something like:
```
>>> znlib
Available zincware packages:
✓ znlib (0.1.0)
✓ zntrack (0.4.3)
✗ mdsuite
✓ znjson (0.2.1)
✓ zninit (0.1.1)
✓ dot4dict (0.1.1)
✗ znipy
✗ supercharge
✗ znvis
✗ symdet
```
Furthermore, `znlib` provides you with some example [ZnTrack](https://github.com/zincware/ZnTrack) Nodes.
```python
from znlib.examples import MonteCarloPiEstimator
mcpi = MonteCarloPiEstimator(n_points=1000).write_graph(run=True)
print(mcpi.load().estimate)
>>> 3.128
```
The idea of the `znlib` package is to provide a collection of [ZnTrack](https://github.com/zincware/ZnTrack) Nodes from all different fields of research.
Every contribution is very welcome.
For new Nodes:
1. Fork this repository.
2. Create a file under the directory `znlib/examples`
3. Make a Pull request.
| znlib | /znlib-0.1.0.tar.gz/znlib-0.1.0/README.md | README.md |
# Znop
Library that solves discrete math operations of the group Zn, provides both as calculator program or third party library.
> The group Zn consists of the elements {0, 1, 2, . . . , n−1} with addition mod n as the operation. You can also multiply elements of Zn, but you do not obtain a group: The element 0 does not have a multiplicative inverse, for instance.
> However, if you confine your attention to the units in Zn — the elements which have multiplicative inverses — you do get a group under multiplication mod n. It is denoted Un, and is called the group of units in Zn.
## Program Usage
Describe how to install the calculator and its commands.
> ***Note***: This program will ***always create a `znop_db.json` file if it doesn't exist*** in the directory you execute the program, this file is aimed to save your last ~30 commands and the Zn group (default n=10) set on your program.
### Install from source
0. Make sure to have python > v3.6 installed.
1. `$ git clone https://github.com/paaksing/Znop.git`
2. `$ cd Znop`
3. `$ python setup.py install`
4. `$ znop`.
### Install using pip
0. Make sure to have python > v3.6 installed and `pip` installed.
1. `$ pip install znop`.
2. `$ znop`.
### Install as executable
1. Find the latest executable in this repository's [Releases](https://github.com/paaksing/Znop/releases).
2. Download it to local machine
3. Execute it.
### Commands
All payload passed to the commands should strictly match this regex: `[a-zA-Z0-9\+\-\*\(\)\^]`
| Command | Description |
| --- | --- |
| set n=`<setnumber>` | Set the set number of Z |
| reduce `<expression>` | Reduce a Zn expression or equation |
| solve `<equation>` | Solve an one-dimensional Zn equation |
| help | Usage of this program |
| quit | Quit this program |
### Example
```bash
(n=10) reduce (3x*9)+78-4x
3x+8
(n=10) set n=6
OK
(n=6) solve x^2+3x+2=0
x ∈ {1, 2, 4, 5}
(n=6) quit
```
## Library Usage
Describe usage and API of this library.
### Requirements and installation
- Python 3.6 (due to requirements of f-strings)
- Install using `pip install znop`
## API Documentation
This library consists of 3 modules: `core` and `exceptions`. All objects in this library can be "copied" or "reinstantiated" by doing `eval(repr(obj))` where obj is an `znop` object. `str()` will return the string representation of the object and `repr()` will return the string representation of the object in python syntax.
Import the object from the respective modules e.g.: `from znop.core import ZnEquation`
### znop.core.ZnTerm
Represents a term in the group of ZnTerm
- `__init__(n: int, raw: str)`: Create an instance of ZnTerm, arguments: n (set number), raw (raw string of term, e.g. `'2x'`).
- `__add__, __sub__, __mul__, __neg__, __eq__`: This object supports [`+`, `-`, `*`] operations between ZnTerms, with the exception of multiplications that it can multiply a ZnExpression by doing distributive, it will always return a new ZnTerm. Supports `-` (negate) operation and returns a new ZnTerm. It also supports equality comparison `==` between ZnTerms.
- `eval(values: Dict[str, int])`: Evaluate the variables in the term, receives a mapping of variable name to value e.g. `{'x': 6}`, and return a new ZnTerm.
### znop.core.ZnExpression
- `__init__(n: int, raw: str)`: Create an instance of ZnExpression, arguments: n (set number), raw (raw string of expression, e.g. `'2x+x-3'`). This expression is automatically reduced to its simplest form.
- `__mul__, __eq__`: This objects supports `*` between ZnExpressions and ZnTerms by doing distributive, It also supports equality comparison `==` between ZnExpressions.
- `reduce()`: Reduce the expression to the simplest form, this function is automatically called on instantiation.
- `eval(values: Dict[str, int])`: Evaluate the variables in the expression, receives a mapping of variable name to value e.g. `{'x': 6}`, and return a new ZnExpression.
### znop.core.ZnEquation
- `__init__(n: int, raw: str)`: Create an instance of ZnEquation, arguments: n (set number), raw (raw string of equation, e.g. `'2x^2+3=0'`). This equation is automatically reduced to its simplest form.
- `reduce()`: Reduce the equation to the simplest form, this function is automatically called on instantiation.
- `solve()`: Solve the equation by returning a list of solutions (ints). If the equation cannot be solved, then `ResolveError` will be raised.
### znop.exceptions.ZSetError
Operation between ZnInt of different Z set.
### znop.exceptions.ZVarError
Operation between ZnInt of different variables outside products.
### znop.exceptions.ParseError
Indicates a parsing error, reason will be stated when `ParseError` is thrown.
### znop.exceptions.ResolveError
Could not resolve equation.
| znop | /znop-0.2.0.tar.gz/znop-0.2.0/README.md | README.md |
# znp
znp stands for Zathura Next Or Previous. You can also use znp to add a given
file to an instance of [zathura](https://pwmt.org/projects/zathura/).
# Usage
## Next or Previous
The main goal of znp is to provide an easy way to go to the next or previous
file from within zathura. As of
[yet](https://git.pwmt.org/pwmt/zathura/-/issues/93) this functionality is not a
part of zathura's functionality. However, after installing `znp` you can add the
following to your `zathurarc` to set `N` and `P` to go to the next or previous
file:
``` vim-snippet
map N exec "znp -n '$FILE'"
map P exec "znp -p '$FILE'"
" Please note the ^ ^ apostrophes around $FILE.
" These are necessary for files with whitespace
```
*Note* that if your system does not use extended window manager hints
([ewmh](https://specifications.freedesktop.org/wm-spec/wm-spec-1.3.html)), or
you do not have the ewmh python package installed; then, this command may fail
if you have two instances of zathura open in the same directory. This is not
something that I have a reasonable fix for and there is no way to reliably
determine the instance issuing the next or previous command. The only way I can
think of fixing this would require patching zathura to include expansion of a
`$PID` variable from the exec function and include that in the zathurarc
command. However, I am a not a programmer so reviewing the code base and
getting this functionality added may take me some time.
## Adding files
znp can act as a zathura wrapper and add a given file to an existing instance:
``` shell
znp file.pdf
znp /path/to/file.pdf
```
You can give znp a relative or absolute path to file. znp will insert the
current working directory to make a relative path absolute. No variable
expansion will performed by znp as it expects `$HOME` and such to get expanded
by the shell calling znp.
The above works best when only one instance of zathura exists. However, if
multiple exist then zathura will use the user defined `prompt_cmd` set in
`$XDG_CONFIG_HOME/znp/znp.conf` to present a list of zathura instances to open
the file in. The default is `fzf` but you may use `dmenu` or `rofi`. Here is
how this looks in practice:


To aviod any prompting you can pass the desired pid to use with the `-P` flag:
``` shell
znp -P 123456 file.pdf
znp -P 123456 /path/to/file.pdf
```
This would require a bit more work on your part but it may be useful in
scripting.
## Query
Speaking of scripting, I added the `-q, --query` flag for my personal scripting
purposes.
The `--query` flag will take the `FILE` argument given to znp and search all
zathura pids for the _first_ (see the note in the [next or
previous](#next-or-previous) section) one that has that file open and return
it's pid. I make use of this to track my last read pdf, epub, cbz/r, zip, etc.
using the returned pid to kill the assumed instance issuing the command.
Basically a session tracker so to speak. Maybe there are other purposes for this
or maybe the `zathura.py` module would be useful as a standalone module for
interacting with zathura via dbus. No clue, let me know.
## User config
You can set the default command prompt in `$XDG_CONFIG_HOME/znp/znp.conf` like
so:
```
prompt_cmd = dmenu
```
**Note** there are no quotes. You can also skip the spaces if you like.
If you have any args/flags you want to use with your command prompt add them
like so:
```
prompt_args = -l 20 -i
```
Simply provide the args/flags as you would normally when using your chosen
prompt_cmd.
**Note** If your prompt_args contain an `=` sign then please escape it with a
backslash otherwise you will get an error.
# Installation
znp is available via [pypi](https://pypi.org/project/znp/) and can install it
via pip in the usual way:
``` shell
pip install znp
```
Use the following if you are installing on a system running X and using
[ewmh](https://specifications.freedesktop.org/wm-spec/wm-spec-1.3.html):
``` shell
pip install znp[x11]
```
Ensure `~/.local/bin` is in your `$PATH` otherwise znp will not be callable from
zathura unless you give the full path to znp.
## Dependencies
1. `python-magic` - used to detect the file type of the next file to prevent
zathura from opening an unreadable file, e.g. log files, markdown files, etc.
2. `psutil` - used to get zathura pids.
## Optional Dependency
1. `ewmh` - used to get the pid of window calling znp. This is a bit hacky but
does allow for the core functionality (opening the next or previous file) to
work without issue. Provided under the `[x11]` branch.
| znp | /znp-0.0.12.tar.gz/znp-0.0.12/README.md | README.md |
[](https://badge.fury.io/py/znslice)
[](https://coveralls.io/github/zincware/ZnSlice?branch=main)
[](https://github.com/zincware)
# ZnSlice
A lightweight library (without external dependencies) for:
- advanced slicing.
- cache `__getitem__(self, item)`
- lazy load `__getitem__(self, item)`
# Installation
```bash
pip install znslice
```
# Usage
## Advanced Slicing and Cache
Convert List to `znslice.LazySequence` to allow advanced slicing.
```python
import znslice
lst = znslice.LazySequence.from_obj([1, 2, 3], indices=[0, 2])
print(lst[[0, 1]].tolist()) # [1, 3]
```
```python
import znslice
import collections.abc
class MapList(collections.abc.Sequence):
def __init__(self, data, func):
self.data = data
self.func = func
@znslice.znslice
def __getitem__(self, item: int):
print(f"Loading item = {item}")
return self.func(self.data[item])
def __len__(self):
return len(self.data)
data = MapList([0, 1, 2, 3, 4], lambda x: x ** 2)
assert data[0] == 0
assert data[[1, 2, 3]] == [1, 4, 9]
# calling data[:] will now only compute data[4] and load the remaining data from cache
assert data[:] == [0, 1, 4, 9, 16]
```
## Lazy Database Loading
You can use `znslice` to lazy load data from a database. This is useful if you have a large database and only want to load a small subset of the data.
In the following we will use the `ase` package to generate `Atoms` objects stored in a database and load them lazily.
```python
import ase.io
import ase.db
import znslice
import tqdm
import random
# create a database
with ase.db.connect("data.db", append=False) as db:
for _ in range(10):
atoms = ase.Atoms('CO', positions=[(0, 0, 0), (0, 0, random.random())])
db.write(atoms, group="data")
# load the database lazily
class ReadASEDB:
def __init__(self, file):
self.file = file
@znslice.znslice(
advanced_slicing=True, # this getitem supports advanced slicingn
lazy=True # we want to lazy load the data
)
def __getitem__(self, item):
data = []
with ase.db.connect(self.file) as database:
if isinstance(item, int):
print(f"get {item = }")
return database[item + 1].toatoms()
for idx in tqdm.tqdm(item):
data.append(database[idx + 1].toatoms())
return data
def __len__(self):
with ase.db.connect(self.file) as db:
return len(db)
db = ReadASEDB("data.db")
data = db[::2] # LazySequence([<__main__.ReadASEDB>], [[0, 2, 4, 6, 8]])
data.tolist() # list[ase.Atoms]
# supports addition, advanced slicing, etc.
data = db[::2] + db[1::2]
```
| znslice | /znslice-0.1.3.tar.gz/znslice-0.1.3/README.md | README.md |
[](https://coveralls.io/github/zincware/ZnTrack)
[](https://codecov.io/gh/zincware/ZnTrack)
[](https://codeclimate.com/github/zincware/ZnTrack/maintainability)

[](https://badge.fury.io/py/zntrack)
[](https://github.com/psf/black/)
[](https://zntrack.readthedocs.io/en/latest/?badge=latest)
[](https://mybinder.org/v2/gh/zincware/ZnTrack/HEAD)
[](https://doi.org/10.5281/zenodo.6472850)
[](https://zntrack.readthedocs.io/en/latest/)
[](https://github.com/zincware)

# ZnTrack: A Parameter Tracking Package for Python
ZnTrack `zɪŋk træk` is a lightweight and easy-to-use package for tracking
parameters in your Python projects using DVC. With ZnTrack, you can define
parameters in Python classes and monitor how they change over time. This
information can then be used to compare the results of different runs, identify
computational bottlenecks, and avoid the re-running of code components where
parameters have not changed.
## Key Features
- Parameter, output and metric tracking: ZnTrack makes it easy to store and
track the values of parameters in your Python code. It further allows you to
store any outputs produced and gives an easy interface to define metrics.
- Lightweight and database-free: Unlike other parameter tracking solutions,
ZnTrack is lightweight and does not require any databases.
## Getting Started
To get started with ZnTrack, you can install it via pip: `pip install zntrack`
Next, you can start using ZnTrack to track parameters, outputs and metrics in
your Python code. Here's an example of how to use ZnTrack to track the value of
a parameter in a Python class. Start in an empty directory and run `git init`
and `dvc init` for preparation.
Then put the following into a python file called `hello_world.py` and call it
with `python hello_world.py`.
```python
import zntrack
from random import randrange
class HelloWorld(zntrack.Node):
"""Define a ZnTrack Node"""
# parameter to be tracked
max_number: int = zntrack.zn.params()
# parameter to store as output
random_number: int = zntrack.zn.outs()
def run(self):
"""Command to be run by DVC"""
self.random_number = randrange(self.max_number)
if __name__ == "__main__":
# Write the computational graph
with zntrack.Project() as project:
hello_world = HelloWorld(max_number=512)
project.run()
```
This will create a [DVC](https://dvc.org) stage `HelloWorld`. The workflow is
defined in `dvc.yaml` and the parameters are stored in `params.yaml`.
This will run the workflow with `dvc repro` automatically. Once the graph is
executed, the results, i.e. the random number can be accessed directly by the
Node object.
```python
hello_world.load()
print(hello_world.random_number)
```
> ## Tip
>
> You can easily load this Node directly from a repository.
>
> ```python
> import zntrack
>
> node = zntrack.from_rev(
> "HelloWorld",
> remote="https://github.com/PythonFZ/ZnTrackExamples.git",
> rev="b9316bf",
> )
> ```
>
> Try accessing the `max_number` parameter and `random_number` output. All Nodes
> from this and many other repositories can be loaded like this.
An overview of all the ZnTrack features as well as more detailed examples can be
found in the [ZnTrack Documentation](https://zntrack.readthedocs.io/en/latest/).
## Wrap Python Functions
ZnTrack also provides tools to convert a Python function into a DVC Node. This
approach is much more lightweight compared to the class-based approach with only
a reduced set of functionality. Therefore, it is recommended for smaller nodes
that do not need the additional toolset that the class-based approach provides.
```python
from zntrack import nodify, NodeConfig
import pathlib
@nodify(outs=pathlib.Path("text.txt"), params={"text": "Lorem Ipsum"})
def write_text(cfg: NodeConfig):
cfg.outs.write_text(
cfg.params.text
)
# build the DVC graph
with zntrack.Project() as project:
write_text()
project.run()
```
The `cfg` dataclass passed to the function provides access to all configured
files and parameters via [dot4dict](https://github.com/zincware/dot4dict). The
function body will be executed by the `dvc repro` command or if ran via
`write_text(run=True)`. All parameters are loaded from or stored in
`params.yaml`.
# Technical Details
## ZnTrack as an Object-Relational Mapping for DVC
On a fundamental level the ZnTrack package provides an easy-to-use interface for
DVC directly from Python. It handles all the computational overhead of reading
config files, defining outputs in the `dvc.yaml` as well as in the script and
much more.
For more information on DVC visit their [homepage](https://dvc.org/doc).
# Copyright
This project is distributed under the
[Apache License Version 2.0](https://github.com/zincware/ZnTrack/blob/main/LICENSE).
## Similar Tools
The following (incomplete) list of other projects that either work together with
ZnTrack or can achieve similar results with slightly different goals or
programming languages.
- [DVC](https://dvc.org/) - Main dependency of ZnTrack for Data Version Control.
- [dvthis](https://github.com/jcpsantiago/dvthis) - Introduce DVC to R.
- [DAGsHub Client](https://github.com/DAGsHub/client) - Logging parameters from
within .Python
- [MLFlow](https://mlflow.org/) - A Machine Learning Lifecycle Platform.
- [Metaflow](https://metaflow.org/) - A framework for real-life data science.
- [Hydra](https://hydra.cc/) - A framework for elegantly configuring complex
applications
| zntrack | /zntrack-0.6.3a1.tar.gz/zntrack-0.6.3a1/README.md | README.md |
from typing import *
import math
from anchorpy import Program, Context
from solana.publickey import PublicKey
from solana.keypair import Keypair
from solana.rpc.commitment import Finalized
from solana.rpc.types import TxOpts
from solana.sysvar import SYSVAR_RENT_PUBKEY
import solana.system_program
CONTROL_ACCOUNT_SIZE = 8 + 4482
def decode_symbol(s) -> str:
s = s.data
i = s.index(0)
return bytes(s[:i]).decode("utf-8")
def decode_wrapped_i80f48(n) -> float:
return n.data / (2**48)
def div_to_float(a: int, b: int) -> float:
q, r = divmod(a, b)
gcd = math.gcd(r, b)
return float(q) + (r // gcd) / (b // gcd)
def big_to_small_amount(n: int | float, /, *, decimals: int) -> int:
shift = 10 ** abs(decimals)
if decimals >= 0:
integral = int(n) * shift
fractional = int((n % 1) * shift)
return integral + fractional
else:
return int(n) // shift
def small_to_big_amount(n: int | float, /, *, decimals: int):
return n / 10**decimals
def price_to_lots(
n: int | float,
/,
*,
base_decimals: int,
quote_decimals: int,
base_lot_size: int,
quote_lot_size: int,
) -> int:
return round(
float(n)
* base_lot_size
/ quote_lot_size
* 10 ** (quote_decimals - base_decimals)
)
def lots_to_price(
n: int,
/,
*,
base_decimals: int,
quote_decimals: int,
base_lot_size: int,
quote_lot_size: int,
) -> float:
n *= quote_lot_size * 10 ** (base_decimals - quote_decimals)
return div_to_float(n, base_lot_size)
def size_to_lots(n: float, /, *, decimals: int, lot_size: int) -> int:
return round(n * 10**decimals) // lot_size
def lots_to_size(n: int, /, *, decimals: int, lot_size: int) -> float:
return div_to_float(n * lot_size, 10**decimals)
def margin_pda(
*,
owner: PublicKey,
state: PublicKey,
program_id: PublicKey,
) -> Tuple[PublicKey, int]:
return PublicKey.find_program_address(
[
owner.__bytes__(),
state.__bytes__(),
bytes("marginv1", "utf-8"),
],
program_id,
)
def open_orders_pda(
*, control: PublicKey, dex_market: PublicKey, program_id: PublicKey
) -> Tuple[PublicKey, int]:
return PublicKey.find_program_address(
[control.__bytes__(), dex_market.__bytes__()], program_id
)
def state_signer_pda(
*,
state: PublicKey,
program_id: PublicKey,
) -> Tuple[PublicKey, int]:
return PublicKey.find_program_address(
[
state.__bytes__(),
],
program_id,
)
def heimdall_pda(*, program_id: PublicKey) -> PublicKey:
return PublicKey.find_program_address([b"heimdallv1"], program_id)[0]
async def create_margin(
*, program: Program, state: PublicKey, key: PublicKey, nonce: int
) -> str:
control = Keypair()
control_lamports = (
await program.provider.connection.get_minimum_balance_for_rent_exemption(
CONTROL_ACCOUNT_SIZE
)
)["result"]
return await program.rpc["create_margin"](
nonce,
ctx=Context(
accounts={
"state": state,
"authority": program.provider.wallet.public_key,
"payer": program.provider.wallet.public_key,
"margin": key,
"control": control.public_key,
"rent": SYSVAR_RENT_PUBKEY,
"system_program": solana.system_program.SYS_PROGRAM_ID,
},
pre_instructions=[
solana.system_program.create_account(
solana.system_program.CreateAccountParams(
from_pubkey=program.provider.wallet.public_key,
new_account_pubkey=control.public_key,
lamports=control_lamports,
space=CONTROL_ACCOUNT_SIZE,
program_id=program.program_id,
)
)
],
signers=[control],
options=TxOpts(
max_retries=5,
preflight_commitment=Finalized,
skip_confirmation=False,
skip_preflight=False,
),
),
) | zo-sdk | /zo_sdk-0.2.0-py3-none-any.whl/zo/util.py | util.py |
from typing import Literal, Any
from dataclasses import dataclass
from datetime import datetime
from anchorpy import Program
from solana.publickey import PublicKey
Side = Literal["bid", "ask"]
OrderType = Literal[
"limit", "ioc", "postonly", "reduceonlyioc", "reduceonlylimit", "fok"
]
PerpType = Literal["future", "calloption", "putoption", "square"]
@dataclass(frozen=True)
class CollateralInfo:
mint: PublicKey
oracle_symbol: str
decimals: int
weight: int
liq_fee: int
is_borrowable: bool
optimal_util: int
optimal_rate: int
max_rate: int
og_fee: int
is_swappable: bool
serum_open_orders: PublicKey
max_deposit: int
dust_threshold: int
vault: PublicKey
@dataclass(frozen=True)
class FundingInfo:
hourly: float
daily: float
apr: float
@dataclass(frozen=True)
class MarketInfo:
address: PublicKey
symbol: str
oracle_symbol: str
perp_type: PerpType
base_decimals: int
base_lot_size: int
quote_decimals: int
quote_lot_size: int
strike: int
base_imf: int
liq_fee: int
index_price: float
mark_price: float
funding_sample_start_time: datetime
funding_info: None | FundingInfo
@dataclass(frozen=True)
class PositionInfo:
size: float
entry_value: float
realized_pnl: float
funding_index: float
side: Literal["long", "short"]
def order_type_from_str(t: OrderType, /, *, program: Program):
typ = program.type["OrderType"]
match t:
case "limit":
return typ.Limit()
case "ioc":
return typ.ImmediateOrCancel()
case "postonly":
return typ.PostOnly()
case "reduceonlyioc":
return typ.ReduceOnlyIoc()
case "reduceonlylimit":
return typ.ReduceOnlyLimit()
case "fok":
return typ.FillOrKill()
case _:
raise TypeError(f"unsupported order type {t}")
def perp_type_to_str(t: Any, /, *, program: Program) -> PerpType:
# HACK: Enum comparison is currently broken, so using `str`.
t = str(t)
if t == "PerpType.Future()":
return "future"
if t == "PerpType.CallOption()":
return "calloption"
if t == "PerpType.PutOption()":
return "putoption"
if t == "PerpType.Square()":
return "square"
raise LookupError(f"invalid perp type {t}") | zo-sdk | /zo_sdk-0.2.0-py3-none-any.whl/zo/types.py | types.py |
import base64
import enum
import struct
from typing import *
from solana.publickey import PublicKey
from . import util
i128 = NewType("i128", int)
u128 = NewType("u128", int)
def decode_field(ty, v):
if ty == i128:
return ty(int.from_bytes(v, "little", signed=True))
elif ty == u128:
return ty(int.from_bytes(v, "little", signed=False))
else:
return ty(v)
def strip_padding(b: bytes) -> bytes:
if len(b) < 12 or b[:5] != b"serum" or b[-7:] != b"padding":
raise ValueError("invalid buffer for dex struct")
else:
return b[5:-7]
def decode_namedtuple(cls, fmt: str, b: bytes):
return cls._make(
[
decode_field(cls.__annotations__[f], g)
for f, g in zip(cls._fields, struct.unpack(fmt, b))
]
)
class AccountFlag(enum.IntFlag):
INITIALIZED = 1 << 0
MARKET = 1 << 1
OPEN_ORDERS = 1 << 2
REQUEST_QUEUE = 1 << 3
EVENT_QUEUE = 1 << 4
BIDS = 1 << 5
ASKS = 1 << 6
DISABLED = 1 << 7
CLOSED = 1 << 8
PERMISSIONED = 1 << 9
class Market(NamedTuple):
account_flags: AccountFlag
own_address: PublicKey
quote_fees_accrued: int
req_q: PublicKey
event_q: PublicKey
bids: PublicKey
asks: PublicKey
base_lot_size: int
quote_lot_size: int
fee_rate_bps: int
referrer_rebates_accrued: int
funding_index: i128
last_updated: int
strike: int
perp_type: int
base_decimals: int
open_interest: int
open_orders_authority: PublicKey
prune_authority: PublicKey
@property
def quote_decimals(self):
return 6
@classmethod
def from_bytes(cls, b: bytes):
b = strip_padding(b)
r = decode_namedtuple(cls, "<Q32sQ32s32s32s32s4Q16s5Q32s32s1032x", b)
if (
r.account_flags
!= AccountFlag.INITIALIZED | AccountFlag.MARKET | AccountFlag.PERMISSIONED
):
raise ValueError("invalid account_flags for market")
return r
@classmethod
def from_base64(cls, b: str):
return cls.from_bytes(base64.b64decode(b))
def _decode_orderbook_from_base64(self, bids: str, asks: str):
return Orderbook(Slab.from_base64(bids), Slab.from_base64(asks), self)
class SlabNode:
class Uninitialized(NamedTuple):
pass
class Inner(NamedTuple):
prefix_len: int
key: u128
l: int
r: int
class Leaf(NamedTuple):
owner_slot: int
fee_tier: int
key: u128
control: PublicKey
quantity: int
client_order_id: int
class Free(NamedTuple):
next: int
class LastFree(NamedTuple):
pass
@classmethod
def _from_bytes(cls, b: bytes):
tag, body = int.from_bytes(b[:4], "little", signed=False), b[4:]
if tag < 0 or tag > 4:
raise ValueError(f"invalid tag type '{tag}' for slab node")
fmt, cons = [
("<68x", cls.Uninitialized),
("<I16sII40x", cls.Inner),
("<BBxx16s32sQQ", cls.Leaf),
("<I64x", cls.Free),
("<68x", cls.LastFree),
][tag]
return decode_namedtuple(cons, fmt, body)
class Order(NamedTuple):
owner_slot: int
fee_tier: int
order_id: u128
control: PublicKey
size_lots: int
client_order_id: int
price: float
size: float
side: Literal["bid", "ask"]
class Slab(NamedTuple):
account_flags: AccountFlag
bump_index: int
free_list_len: int
free_list_head: int
root: int
leaf_count: int
nodes: list[SlabNode]
@property
def side(self) -> Literal["bid", "ask"]:
return "bid" if AccountFlag.BIDS in self.account_flags else "ask"
def __iter(self, *, ascending: bool) -> Generator[SlabNode.Leaf, None, None]:
if self.leaf_count <= 0:
return
stack = [self.root]
while len(stack) > 0:
node = self.nodes[stack.pop()]
if isinstance(node, SlabNode.Leaf):
yield node
elif isinstance(node, SlabNode.Inner):
if ascending:
stack.extend((node.r, node.l))
else:
stack.extend((node.l, node.r))
def __iter__(self):
return self.__iter(ascending=True)
def __reversed__(self):
return self.__iter(ascending=False)
def min(self):
return next(self.__iter(ascending=True))
def max(self):
return next(self.__iter(ascending=False))
@classmethod
def from_bytes(cls, b: bytes):
b = strip_padding(b)
head, tail = b[:40], b[40:]
fs = list(struct.unpack("<QQQIIQ", head))
fs[0] = decode_field(AccountFlag, fs[0])
nodes = []
for i in range(0, len(tail), 72):
x = SlabNode._from_bytes(tail[i : i + 72])
if isinstance(x, SlabNode.Uninitialized):
break
nodes.append(x)
r = cls._make([*fs, list(nodes)])
if not (AccountFlag.INITIALIZED in r.account_flags) or (
AccountFlag.BIDS in r.account_flags == AccountFlag.ASKS in r.account_flags
):
raise ValueError("invalid account_flags for slab")
return r
@classmethod
def from_base64(cls, b: str):
return cls.from_bytes(base64.b64decode(b))
class Orderbook:
bids: list[Order]
asks: list[Order]
def __init__(self, bids: Slab, asks: Slab, mkt: Market):
kw = {
"base_decimals": mkt.base_decimals,
"quote_decimals": mkt.quote_decimals,
"base_lot_size": mkt.base_lot_size,
"quote_lot_size": mkt.quote_lot_size,
}
self.bids = [
Order._make(
[
*o,
util.lots_to_price(o.key >> 64, **kw),
util.lots_to_size(
o.quantity,
decimals=mkt.base_decimals,
lot_size=mkt.base_lot_size,
),
"bid",
]
)
for o in bids.__reversed__()
]
self.asks = [
Order._make(
[
*o,
util.lots_to_price(o.key >> 64, **kw),
util.lots_to_size(
o.quantity,
decimals=mkt.base_decimals,
lot_size=mkt.base_lot_size,
),
"ask",
]
)
for o in asks
] | zo-sdk | /zo_sdk-0.2.0-py3-none-any.whl/zo/dex.py | dex.py |
from typing import *
import asyncio
import os
import json
import inspect
from datetime import datetime, timezone as tz
from anchorpy import Idl, Program, Provider, Context, Wallet
from anchorpy.error import AccountDoesNotExistError
from solana.publickey import PublicKey
from solana.keypair import Keypair
from solana.transaction import TransactionInstruction, Transaction, TransactionSignature
from solana.rpc.commitment import Commitment, Confirmed
from solana.rpc.async_api import AsyncClient
from solana.rpc.types import TxOpts
from solana.sysvar import SYSVAR_RENT_PUBKEY
from solana.system_program import SYS_PROGRAM_ID
from spl.token.instructions import get_associated_token_address
from spl.token.constants import TOKEN_PROGRAM_ID
from . import util, types, config
from .config import configs, Config
from .types import (
Side,
OrderType,
CollateralInfo,
FundingInfo,
MarketInfo,
PositionInfo,
)
from .dex import Market, Orderbook, Order
T = TypeVar("T")
def GenIxDispatch(cls):
"""Decorator for use with the `Zo` class.
This decorator finds all methods ending in `_ix` on the class,
and adds a trivial wrapper to dispatch the instruction using `Zo.send`.
The `__doc__` is moved to the generated method.
"""
def gen(f):
async def g(self, *a, **kw):
# If you're looking for the source, see the source
# for the `_ix` variant of this method.
return await self.send(f(self, *a, **kw))
return g
for n, f in inspect.getmembers(cls, predicate=inspect.isfunction):
if n.startswith("_") or not n.endswith("_ix"):
continue
name = n[:-3]
g = gen(f)
g.__name__ = name
g.__qualname__ = f.__qualname__[:-3]
g.__doc__ = inspect.getdoc(f)
g.__signature__ = inspect.signature(f).replace(
return_annotation=TransactionSignature
)
g.__annotations__ = inspect.get_annotations(f) # Copies.
g.__annotations__["return"] = TransactionSignature
# The docs are intended for the non '_ix' variant, so modify them.
f.__doc__ = f"See `Zo.{name}`."
setattr(cls, name, g)
return cls
class ZoIndexer(Generic[T]):
def __init__(self, d: dict[str, T], m: Callable[[str | int | PublicKey], str]):
self.d = d
self.m = m
def __repr__(self):
return self.d.__repr__()
def __iter__(self):
return self.d.items().__iter__()
def __len__(self):
return len(self.d)
def __getitem__(self, i: str | int | PublicKey) -> T:
return self.d[self.m(i)]
@GenIxDispatch
class Zo:
__program: Program
__config: Config
__markets: dict[str, MarketInfo]
__collaterals: dict[str, CollateralInfo]
__orderbook: dict[str, Orderbook]
__balance: dict[str, float]
__position: dict[str, PositionInfo]
__dex_markets: dict[str, Market]
__orders: dict[str, list[Order]]
__markets_map: dict[str | int | PublicKey, str]
__collaterals_map: dict[str | int | PublicKey, str]
_zo_state: Any
_zo_state_signer: PublicKey
_zo_heimdall_key: PublicKey
_zo_cache: Any
_zo_margin: Any
_zo_margin_key: None | PublicKey
_zo_control: Any
def __init__(
self,
*,
_program,
_config,
_state,
_state_signer,
_margin,
_margin_key,
_heimdall_key,
):
self.__program = _program
self.__config = _config
self._zo_state = _state
self._zo_state_signer = _state_signer
self._zo_margin = _margin
self._zo_margin_key = _margin_key
self._zo_heimdall_key = _heimdall_key
@classmethod
async def new(
cls,
*,
cluster: Literal["devnet", "mainnet"],
payer: Keypair | None = None,
url: str | None = None,
load_margin: bool = True,
create_margin: bool = True,
tx_opts: TxOpts = TxOpts(
max_retries=None,
preflight_commitment=Confirmed,
skip_confirmation=False,
skip_preflight=False,
),
):
"""Create a new client instance.
Args:
cluster: Which cluster to connect to.
payer: The transaction payer and margin owner. Defaults to
the local transaction payer.
url: URL for the RPC endpoint.
load_margin: Whether to load the associated margin account.
If `False`, any transaction requiring a margin will fail.
create_margin: Whether to create the associated margin
account if it doesn't already exist.
tx_opts: The transaction options.
"""
if cluster not in configs.keys():
raise TypeError(f"`cluster` must be one of: {configs.keys()}")
config = configs[cluster]
if url is None:
url = config.CLUSTER_URL
idl_path = os.path.join(os.path.dirname(__file__), "idl.json")
with open(idl_path) as f:
raw_idl = json.load(f)
idl = Idl.from_json(raw_idl)
conn = AsyncClient(url)
wallet = Wallet(payer) if payer is not None else Wallet.local()
provider = Provider(conn, wallet, opts=tx_opts)
program = Program(idl, config.ZO_PROGRAM_ID, provider=provider)
state = await program.account["State"].fetch(config.ZO_STATE_ID)
state_signer, state_signer_nonce = util.state_signer_pda(
state=config.ZO_STATE_ID, program_id=config.ZO_PROGRAM_ID
)
if state.signer_nonce != state_signer_nonce:
raise ValueError(
f"Invalid state key ({config.ZO_STATE_ID}) for program id ({config.ZO_PROGRAM_ID})"
)
heimdall_key = util.heimdall_pda(program_id=config.ZO_PROGRAM_ID)
margin = None
margin_key = None
if load_margin:
margin_key, nonce = util.margin_pda(
owner=wallet.public_key,
state=config.ZO_STATE_ID,
program_id=config.ZO_PROGRAM_ID,
)
try:
margin = await program.account["Margin"].fetch(margin_key)
except AccountDoesNotExistError as e:
if not create_margin:
raise e
await util.create_margin(
program=program,
state=config.ZO_STATE_ID,
key=margin_key,
nonce=nonce,
)
margin = await program.account["Margin"].fetch(margin_key)
zo = cls(
_config=config,
_program=program,
_state=state,
_state_signer=state_signer,
_margin=margin,
_margin_key=margin_key,
_heimdall_key=heimdall_key,
)
await zo.refresh(commitment=Confirmed)
return zo
@property
def program(self) -> Program:
return self.__program
@property
def provider(self) -> Provider:
return self.program.provider
@property
def connection(self) -> AsyncClient:
return self.provider.connection
@property
def wallet(self) -> Wallet:
return self.provider.wallet
@property
def collaterals(self):
"""List of collaterals and their metadata."""
return ZoIndexer(self.__collaterals, lambda k: self.__collaterals_map[k])
@property
def markets(self):
"""List of collaterals and markets metadata."""
return ZoIndexer(self.__markets, lambda k: self.__markets_map[k])
@property
def orderbook(self):
"""Current state of the orderbook."""
return ZoIndexer(self.__orderbook, lambda k: self.__markets_map[k])
@property
def balance(self):
"""Current account balance."""
return ZoIndexer(self.__balance, lambda k: self.__collaterals_map[k])
@property
def position(self):
"""Current position."""
return ZoIndexer(self.__position, lambda k: self.__markets_map[k])
@property
def orders(self):
"""Currently active orders."""
return ZoIndexer(self.__orders, lambda k: self.__markets_map[k])
def _get_open_orders_info(self, key: int | str, /):
if isinstance(key, str):
for k, v in self.__markets_map.items():
if v == key and isinstance(k, int):
key = k
break
else:
ValueError("")
o = self._zo_control.open_orders_agg[key]
return o if o.key != PublicKey(0) else None
def __reload_collaterals(self):
map = {}
collaterals = {}
for i, c in enumerate(self._zo_state.collaterals):
if c.mint == PublicKey(0):
break
symbol = util.decode_symbol(c.oracle_symbol)
map[symbol] = symbol
map[i] = symbol
map[c.mint] = symbol
collaterals[symbol] = CollateralInfo(
mint=c.mint,
oracle_symbol=symbol,
decimals=c.decimals,
weight=c.weight,
liq_fee=c.liq_fee,
is_borrowable=c.is_borrowable,
optimal_util=c.optimal_util,
optimal_rate=c.optimal_rate,
max_rate=c.max_rate,
og_fee=c.og_fee,
is_swappable=c.is_swappable,
serum_open_orders=c.serum_open_orders,
max_deposit=c.max_deposit,
dust_threshold=c.dust_threshold,
vault=self._zo_state.vaults[i],
)
self.__collaterals_map = map
self.__collaterals = collaterals
def __reload_markets(self):
map = {}
markets = {}
for i, m in enumerate(self._zo_state.perp_markets):
if m.dex_market == PublicKey(0):
break
symbol = util.decode_symbol(m.symbol)
if symbol == "LUNA-PERP":
continue
map[symbol] = symbol
map[i] = symbol
map[m.dex_market] = symbol
oracle = None
for o in reversed(self._zo_cache.oracles):
if util.decode_symbol(m.oracle_symbol) == util.decode_symbol(o.symbol):
oracle = o
break
else:
raise IndexError(f"oracle for market {symbol} not found")
mark = self._zo_cache.marks[i]
price_adj = 10 ** (m.asset_decimals - 6)
index_price = util.decode_wrapped_i80f48(oracle.price) * price_adj
mark_price = util.decode_wrapped_i80f48(mark.price) * price_adj
if types.perp_type_to_str(m.perp_type, program=self.program) == "square":
index_price = index_price**2 / m.strike
funding_sample_start = datetime.fromtimestamp(
mark.twap.last_sample_start_time, tz=tz.utc
)
cumul_avg = util.decode_wrapped_i80f48(mark.twap.cumul_avg)
if abs(cumul_avg) == 0 or funding_sample_start.minute == 0:
funding_info = None
else:
daily_funding = cumul_avg / funding_sample_start.minute
funding_info = FundingInfo(
daily=daily_funding,
hourly=daily_funding / 24,
apr=daily_funding * 100 * 365,
)
markets[symbol] = MarketInfo(
address=m.dex_market,
symbol=symbol,
oracle_symbol=util.decode_symbol(m.oracle_symbol),
perp_type=types.perp_type_to_str(m.perp_type, program=self.program),
base_decimals=m.asset_decimals,
base_lot_size=m.asset_lot_size,
quote_decimals=6,
quote_lot_size=m.quote_lot_size,
strike=m.strike,
base_imf=m.base_imf,
liq_fee=m.liq_fee,
index_price=index_price,
mark_price=mark_price,
funding_sample_start_time=funding_sample_start,
funding_info=funding_info,
)
self.__markets_map = map
self.__markets = markets
def __reload_balances(self):
if self._zo_margin is None:
return
balances = {}
for i, c in enumerate(self._zo_margin.collateral):
if i not in self.__collaterals_map:
break
decimals = self.collaterals[i].decimals
c = util.decode_wrapped_i80f48(c)
m = self._zo_cache.borrow_cache[i]
m = m.supply_multiplier if c >= 0 else m.borrow_multiplier
m = util.decode_wrapped_i80f48(m)
balances[self.__collaterals_map[i]] = util.small_to_big_amount(
c * m, decimals=decimals
)
self.__balance = balances
def __reload_positions(self):
if self._zo_margin is None:
return
positions = {}
for s, m in self.markets:
if (oo := self._get_open_orders_info(s)) is not None:
positions[s] = PositionInfo(
size=util.small_to_big_amount(
abs(oo.pos_size), decimals=m.base_decimals
),
entry_value=util.small_to_big_amount(
abs(oo.native_pc_total), decimals=m.quote_decimals
),
realized_pnl=util.small_to_big_amount(
oo.realized_pnl, decimals=m.base_decimals
),
funding_index=util.small_to_big_amount(
oo.funding_index, decimals=m.quote_decimals
),
side="long" if oo.pos_size >= 0 else "short",
)
else:
positions[s] = PositionInfo(
size=0, entry_value=0, realized_pnl=0, funding_index=1, side="long"
)
self.__position = positions
pass
async def __reload_dex_markets(self, *, commitment: None | Commitment = None):
ks = [
m.dex_market
for m in self._zo_state.perp_markets
if m.dex_market in self.__markets_map
]
res: Any = await self.connection.get_multiple_accounts(
ks, encoding="base64", commitment=commitment
)
res = res["result"]["value"]
self.__dex_markets = {
self.__markets_map[ks[i]]: Market.from_base64(res[i]["data"][0])
for i in range(len(ks))
}
async def refresh_orders(self, *, commitment: None | Commitment = None):
"""Refresh only the orderbook and orders."""
ss = []
ks = []
for s, mkt in self.__dex_markets.items():
ss.append(s)
ks.extend((mkt.bids, mkt.asks))
res: Any = await self.connection.get_multiple_accounts(
ks, encoding="base64", commitment=commitment
)
res = res["result"]["value"]
orders = self._zo_margin and {}
orderbook = {}
for i, s in enumerate(ss):
mkt = self.__dex_markets[s]
ob = mkt._decode_orderbook_from_base64(
res[2 * i]["data"][0], res[2 * i + 1]["data"][0]
)
orderbook[s] = ob
if self._zo_margin is not None:
os = []
for slab in [ob.bids, ob.asks]:
for o in slab:
if o.control == self._zo_margin.control:
os.append(o)
orders[s] = os
self.__orderbook = orderbook
self.__orders = orders
async def __refresh_margin(self, *, commitment: None | Commitment = None):
if self._zo_margin_key is not None:
self._zo_margin, self._zo_control = await asyncio.gather(
self.program.account["Margin"].fetch(self._zo_margin_key, commitment),
self.program.account["Control"].fetch(
self._zo_margin.control, commitment
),
)
async def refresh(self, *, commitment: Commitment = Confirmed):
"""Refresh the loaded accounts to see updates."""
self._zo_state, self._zo_cache, _ = await asyncio.gather(
self.program.account["State"].fetch(self.__config.ZO_STATE_ID, commitment),
self.program.account["Cache"].fetch(self._zo_state.cache, commitment),
self.__refresh_margin(),
)
self.__reload_collaterals()
self.__reload_markets()
self.__reload_balances()
self.__reload_positions()
await self.__reload_dex_markets(commitment=commitment)
await self.refresh_orders(commitment=commitment)
async def send(
self, *xs: TransactionInstruction | Transaction
) -> TransactionSignature:
"""Dispatch a set of instructions.
Args:
xs: The instruction or transaction to add.
Returns:
The transaction signature.
"""
tx = Transaction()
for x in xs:
tx.add(x)
return await self.provider.send(tx)
def deposit_ix(
self,
amount: float,
symbol: str,
/,
*,
repay_only: bool = False,
token_account: None | PublicKey = None,
) -> TransactionInstruction:
"""Deposit collateral into the margin account.
Args:
amount: The amount to deposit, in big units (e.g.: 1.5 SOL, 0.5 BTC).
symbol: Symbol of the collateral being deposited.
repay_only: If true, will only deposit up to the amount borrowed.
token_account: The token account to deposit from, defaulting to
the associated token account.
Returns:
The transaction signature.
"""
info = self.collaterals[symbol]
mint = info.mint
amount = util.big_to_small_amount(amount, decimals=info.decimals)
if token_account is None:
token_account = get_associated_token_address(self.wallet.public_key, mint)
return self.program.instruction["deposit"](
repay_only,
amount,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"token_account": token_account,
"vault": self.collaterals[symbol].vault,
"token_program": TOKEN_PROGRAM_ID,
}
),
)
def withdraw_ix(
self,
amount: float,
symbol: str,
/,
*,
allow_borrow: bool = False,
token_account: None | PublicKey = None,
) -> TransactionInstruction:
"""Withdraw collateral from the margin account.
Args:
amount: The amount to withdraw, in big units (e.g.: 1.5 SOL, 0.5 BTC).
mint: The mint of the collateral.
allow_borrow: If true, will allow borrowing.
token_account: If false, will only be able to withdraw up to
the amount deposited. If false, amount parameter can be
set to an arbitrarily large number to ensure that all
deposits are fully withdrawn.
Returns:
The transaction signature.
"""
info = self.collaterals[symbol]
mint = info.mint
amount = util.big_to_small_amount(amount, decimals=info.decimals)
if token_account is None:
token_account = get_associated_token_address(self.wallet.public_key, mint)
return self.program.instruction["withdraw"](
allow_borrow,
amount,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"token_account": token_account,
"vault": self.collaterals[symbol].vault,
"token_program": TOKEN_PROGRAM_ID,
"heimdall": self._zo_heimdall_key,
}
),
)
def place_order_ix(
self,
size: float,
price: float,
side: Side,
*,
symbol: str,
order_type: OrderType,
limit: int = 10,
client_id: int = 0,
max_ts: None | int = None,
) -> Transaction:
"""Place an order on the orderbook.
Args:
size: The maximum amount of big base units to buy or sell.
price: The limit price in big quote units per big base
units, e.g. 50000 USD/SOL.
side: Whether to place a bid or an ask.
symbol: The market symbol, e.g. "BTC-PERP".
order_type: The order type.
limit: If this order is taking, the limit sets the number of
maker orders the fill will go through, until stopping and
posting. If running into compute unit issues, then set
this number lower.
client_id: Used to tag an order with a unique id, which can
be used to cancel this order through
cancelPerpOrderByClientId. For optimal use, make sure
all ids for every order is unique.
max_ts: If the current on-chain Unix timestamp exceeds this
value, then the order will not go through.
Returns:
The transaction signature.
"""
mkt = self.__dex_markets[symbol]
info = self.markets[symbol]
is_long = side == "bid"
price = util.price_to_lots(
price,
base_decimals=info.base_decimals,
quote_decimals=info.quote_decimals,
base_lot_size=info.base_lot_size,
quote_lot_size=info.quote_lot_size,
)
order_type_: Any = types.order_type_from_str(order_type, program=self.program)
taker_fee = config.taker_fee(info.perp_type)
fee_multiplier = 1 + taker_fee if is_long else 1 - taker_fee
base_qty = util.size_to_lots(
size, decimals=info.base_decimals, lot_size=info.base_lot_size
)
quote_qty = round(price * fee_multiplier * base_qty * info.quote_lot_size)
pre_ixs = []
oo_key = None
oo_info = self._get_open_orders_info(symbol)
if oo_info is not None:
oo_key = oo_info.key
else:
oo_key, _ = util.open_orders_pda(
control=self._zo_margin.control,
dex_market=info.address,
program_id=self.program.program_id,
)
pre_ixs = [
self.program.instruction["create_perp_open_orders"](
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"authority": self.wallet.public_key,
"payer": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"open_orders": oo_key,
"dex_market": info.address,
"dex_program": self.__config.ZO_DEX_ID,
"rent": SYSVAR_RENT_PUBKEY,
"system_program": SYS_PROGRAM_ID,
},
)
)
]
return self.program.instruction["place_perp_order_with_max_ts"](
is_long,
price,
base_qty,
quote_qty,
order_type_,
limit,
client_id,
max_ts if max_ts is not None else 2**63 - 1,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"state_signer": self._zo_state_signer,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"open_orders": oo_key,
"dex_market": info.address,
"req_q": mkt.req_q,
"event_q": mkt.event_q,
"market_bids": mkt.bids,
"market_asks": mkt.asks,
"dex_program": self.__config.ZO_DEX_ID,
"rent": SYSVAR_RENT_PUBKEY,
},
pre_instructions=pre_ixs,
),
)
def __cancel_order_ix(
self,
*,
symbol: str,
side: None | Side = None,
order_id: None | int = None,
client_id: None | int = None,
) -> TransactionInstruction:
mkt = self.__dex_markets[symbol]
oo = self._get_open_orders_info(symbol)
if oo is None:
raise ValueError("open orders account is uninitialized")
return self.program.instruction["cancel_perp_order"](
order_id,
side == "bid",
client_id,
ctx=Context(
accounts={
"state": self.__config.ZO_STATE_ID,
"cache": self._zo_state.cache,
"authority": self.wallet.public_key,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"open_orders": oo.key,
"dex_market": mkt.own_address,
"market_bids": mkt.bids,
"market_asks": mkt.asks,
"event_q": mkt.event_q,
"dex_program": self.__config.ZO_DEX_ID,
}
),
)
def cancel_order_by_order_id_ix(
self, order_id: int, side: Side, *, symbol: str
) -> TransactionInstruction:
"""Cancel an order on the orderbook using the `order_id`.
Args:
order_id: The order id of the order to cancel. To get the
order_id, see the `orders` field.
side: Whether the order is a bid or an ask.
symbol: The market symbol, e.g. "BTC-PERP".
Returns:
The transaction signature.
"""
return self.__cancel_order_ix(symbol=symbol, order_id=order_id, side=side)
def cancel_order_by_client_id_ix(
self, client_id: int, *, symbol: str
) -> TransactionInstruction:
"""Cancel an order on the orderbook using the `client_id`.
Args:
client_id: The client id that was assigned to the order
when it was placed..
symbol: The market symbol, e.g. "BTC-PERP".
Returns:
The transaction signature.
"""
return self.__cancel_order_ix(symbol=symbol, client_id=client_id)
def close_position_ix(self, symbol: str, /, *, max_ts: int | None = None):
"""Close a position.
Args:
symbol: The markey symbol, e.g. "BTC-PERP".
max_ts: See `Zo.place_order`.
Returns:
The transaction signature.
"""
pos = self.position[symbol]
side = "ask" if pos.side == "long" else "bid"
price = 0 if side == "ask" else 2**63 - 1
return self.place_order_ix(
pos.size,
price,
side,
symbol=symbol,
order_type="reduceonlyioc",
max_ts=max_ts,
)
def cancel_all_orders_ix(self, symbol: str, /, *, limit: int = 128):
"""Cancel all orders.
Args:
symbol: The market symbol, e.g. "BTC-PERP".
limit: Maximum number of orders to cancel.
Returns:
The transaction signature.
"""
info = self.markets[symbol]
mkt = self.__dex_markets[symbol]
oo = self._get_open_orders_info(symbol).key
return self.program.instruction["cancel_all_perp_orders"](
limit,
ctx=Context(
accounts={
"authority": self.wallet.public_key,
"state": self.__config.ZO_STATE_ID,
"cache": self._zo_state.cache,
"state_signer": self._zo_state_signer,
"margin": self._zo_margin_key,
"control": self._zo_margin.control,
"oo": oo,
"dex_market": info.address,
"req_q": mkt.req_q,
"event_q": mkt.event_q,
"market_bids": mkt.bids,
"market_asks": mkt.asks,
"dex_program": self.__config.ZO_DEX_ID,
}
),
) | zo-sdk | /zo_sdk-0.2.0-py3-none-any.whl/zo/zo.py | zo.py |
zobepy
======
zobepy - zobe's unsorted library.
This is an unsorted library made by zobe.
usage
=====
pip
---
pip install zobepy
import
------
import zobepy
and use
bsf = zobepy.BinarySizeFormatter(3000)
print(bsf.get())
test
====
prerequisites
pip install -e '.[dev]'
unittest
python -m unittest discover
pytest
pytest
tox
tox
watch htmlcov/index.html for coverage after tox
build document
==============
prerequisites
pip install -e '.[doc]'
make
cd docs
make html
| zobepy | /zobepy-1.0.4.tar.gz/zobepy-1.0.4/README.md | README.md |
=============================
Diamond-specific Zocalo Tools
=============================
.. image:: https://img.shields.io/pypi/v/zocalo-dls.svg
:target: https://pypi.python.org/pypi/zocalo-dls
:alt: PyPI release
.. image:: https://img.shields.io/pypi/l/zocalo-dls.svg
:target: https://pypi.python.org/pypi/zocalo-dls
:alt: BSD license
.. image:: https://github.com/DiamondLightSource/python-zocalo-dls/actions/workflows/python-package.yml/badge.svg
:target: https://github.com/DiamondLightSource/python-zocalo-dls/actions/workflows/python-package.yml
:alt: Build status
.. image:: https://img.shields.io/lgtm/grade/python/g/DiamondLightSource/python-zocalo-dls.svg?logo=lgtm&logoWidth=18
:target: https://lgtm.com/projects/g/DiamondLightSource/python-zocalo-dls/context:python
:alt: Language grade: Python
.. image:: https://img.shields.io/lgtm/alerts/g/DiamondLightSource/python-zocalo-dls.svg?logo=lgtm&logoWidth=18
:target: https://lgtm.com/projects/g/DiamondLightSource/python-zocalo-dls/alerts/
:alt: Total alerts
.. image:: https://readthedocs.org/projects/python-zocalo-dls/badge/?version=latest
:target: https://python-zocalo-dls.readthedocs.io/en/latest/?badge=latest
:alt: Documentation status
.. image:: https://img.shields.io/pypi/pyversions/zocalo-dls.svg
:target: https://pypi.org/project/zocalo-dls/
:alt: Supported Python versions
.. image:: https://flat.badgen.net/dependabot/DiamondLightSource/python-zocalo-dls?icon=dependabot
:target: https://github.com/DiamondLightSource/python-zocalo-dls
:alt: Dependabot dependency updates
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/ambv/black
:alt: Code style: black
`Zocalo <https://github.com/DiamondLightSource/python-zocalo/>`_ services and wrappers which can be used across teams at
`Diamond Light Source <https://www.diamond.ac.uk/Home.html/>`_.
There are specialised versions of the services provided by Zocalo, and services
which are useful at Diamond but are not central to Zocalo itself.
Much of the data analysis work at Diamond is directed by and presented to users through `ISPyB <https://ispyb.github.io/ISPyB/>`_.
Therefore, we provide some central services which enable cooperation between the data analysis pipelines and the ISPyB
database at Diamond.
The code in this repository is actively used for processing of many different experiments at Diamond.
The hope is that soon it will be used across many areas of science here and perhaps elsewhere.
Please take this code inspiration for how to implement Zocalo at other facilities.
Installation
------------
.. code-block::
pip install zocalo-dls
This will add several service and wrapper entry points which should appear with:
.. code-block::
zocalo.service --help
zocalo.wrap --help
Contributing
------------
This package is maintained by a core team at Diamond Light Source.
To contribute, fork this repository and issue a pull request.
`Pre-commit <https://pre-commit.com/>`_ hooks are provided, please check code against these before submitting.
Install with:
.. code-block::
pre-commit install
| zocalo-dls | /zocalo_dls-0.4.0.tar.gz/zocalo_dls-0.4.0/README.rst | README.rst |
from zocalo.wrapper import BaseWrapper
import os
import procrunner
import logging
from pathlib import Path
from shutil import copyfile
import nbformat
logger = logging.getLogger("zocalo_dls.wrapper.jupyter")
class JupyterWrapper(BaseWrapper):
"""
A zocalo wrapper for jupyter headless processing
Copies notebook to run directory, injects filenames,
and runs in place, before making html copy as log
"""
run_script = "/dls_sw/apps/wrapper-scripts/execute_notebook.sh"
param_prefix = "jupyter_"
notebook = "notebook"
module = "module"
payload_key = "target_file"
default_module = "python/3"
def run(self):
assert hasattr(self, "recwrap"), "No recipewrapper object found"
payload = self.recwrap.payload
jp = self.recwrap.recipe_step["job_parameters"]
target_file = self._get_target_file(payload, jp)
ispyb_params = jp["ispyb_parameters"].copy()
ispyb_rd = jp["result_directory"]
override_path = jp["override_path"]
prefix = self._get_prefix(jp)
rd = self._get_run_directory(ispyb_rd, override_path)
note_key = prefix + JupyterWrapper.notebook
notebook, result_path, html_log = self._copy_notebook(
ispyb_params, target_file, rd, note_key
)
mod_key = prefix + JupyterWrapper.module
mod = ispyb_params.get(mod_key, [JupyterWrapper.default_module])[0]
# remove non execution parameters before notebook injection
if mod_key in ispyb_params:
del ispyb_params[mod_key]
del ispyb_params[note_key]
self._inject_parameters(
ispyb_params, target_file, result_path, notebook, prefix
)
command = [self._get_run_script()]
command.append(mod)
command.append(notebook)
logger.info("Command: %s", " ".join(command))
result = procrunner.run(command)
logger.info("Command successful, took %.1f seconds", result["runtime"])
self._record_result(result_path, "Result")
self._record_result(notebook, "Result")
self._record_result(html_log, "Log")
self._broadcast_primary_result(result_path, not result["exitcode"])
return not result["exitcode"]
def _broadcast_primary_result(self, result_path, success):
if not success or not os.path.isfile(result_path):
return
if getattr(self, "recwrap", None):
self.recwrap.send_to(
"result-primary", {JupyterWrapper.payload_key: result_path}
)
def _record_result(self, path, file_type):
if os.path.isfile(path):
p, f = os.path.split(path)
self.record_result_individual_file(
{"file_path": p, "file_name": f, "file_type": file_type}
)
else:
logger.warning("No file found at %s", path)
def _get_target_file(self, payload, jp):
if (
JupyterWrapper.payload_key not in payload
and JupyterWrapper.payload_key not in jp
):
raise RuntimeError("Target file not in payload or job parameters")
if JupyterWrapper.payload_key in payload:
return payload[JupyterWrapper.payload_key]
if JupyterWrapper.payload_key in jp:
return jp[JupyterWrapper.payload_key]
def _copy_notebook(self, params, target, rd, note_key):
if note_key not in params:
raise RuntimeError("No notebook parameter registered")
note_path = params[note_key][0]
if not os.path.isfile(note_path):
raise RuntimeError("Notebook does not exist: %s" % note_path)
prd = Path(rd)
name = Path(Path(target).stem + "_" + Path(note_path).name)
note_dir = prd / "notebooks"
note_dir.mkdir(parents=True, exist_ok=True)
fullpath = note_dir / name
copyfile(note_path, fullpath)
nxspath = rd / name.with_suffix(".nxs")
html = fullpath.with_suffix(".html")
return str(fullpath), str(nxspath), str(html)
def _get_run_directory(self, ispyb_rd, override):
if not override.startswith("{") and os.path.exists(override):
return override
return ispyb_rd
def _get_run_script(self):
return JupyterWrapper.run_script
def _inject_parameters(self, ispyb_params, target, result, notebook, prefix):
nb = nbformat.read(notebook, nbformat.NO_CONVERT)
nb["cells"][0]["source"] = 'inpath = "{}"'.format(target)
nb["cells"][1]["source"] = 'outpath = "{}"'.format(result)
nbformat.write(nb, notebook)
def _get_prefix(self, jp):
db_namespace = jp.get("namespace", "")
if db_namespace:
db_namespace = db_namespace + "_"
return JupyterWrapper.param_prefix + db_namespace | zocalo-dls | /zocalo_dls-0.4.0.tar.gz/zocalo_dls-0.4.0/zocalo_dls/wrapper/jupyter.py | jupyter.py |
from zocalo.wrapper import BaseWrapper
import os
import procrunner
import logging
import json
from pathlib import Path
from shutil import copyfile
from . import wrapper_utils_dls as utils
logger = logging.getLogger("DawnWrapper")
class DawnWrapper(BaseWrapper):
"""
A zocalo wrapper for DAWN headless processing
Builds and checks the json config file from the input
parameters, broadcasts result nexus file path on success
"""
run_script = "/dls_sw/apps/wrapper-scripts/dawn_autoprocessing.sh"
param_prefix = "dawn_"
config_name = param_prefix + "config"
version = "version"
memory = "xmx"
ncores = "numberOfCores"
proc_path = "processingPath"
target = "filePath"
dataset_path = "datasetPath"
overwrite = "monitorForOverwrite"
scan_rank = "scanRank"
payload_key = "target_file"
timeout = "timeOut"
readable = "readable"
datakey = "dataKey"
link_parent = "linkParentEntry"
publisher = "publisherURI"
output_file = "outputFilePath"
delete_file = "deleteProcessingFile"
default_version = "stable"
config_vals = {
memory,
ncores,
proc_path,
dataset_path,
overwrite,
scan_rank,
timeout,
readable,
datakey,
link_parent,
publisher,
delete_file,
}
defaults = {
memory: "2048m",
ncores: 1,
timeout: 60000,
readable: True,
link_parent: True,
overwrite: False,
datakey: "/entry/solstice_scan",
delete_file: False,
}
# things we cant sensibly default or ignore
required = {scan_rank, proc_path, dataset_path, target}
def run(self):
assert hasattr(self, "recwrap"), "No recipewrapper object found"
payload = self.recwrap.payload
jp = self.recwrap.recipe_step["job_parameters"]
target_file = utils.get_target_file(payload, jp)
ispyb_params = jp["ispyb_parameters"]
ispyb_wd = jp["working_directory"]
ispyb_rd = jp["result_directory"]
override_path = jp["override_path"]
config = dict(DawnWrapper.defaults)
if DawnWrapper.config_name in ispyb_params:
self._load_config(ispyb_params[DawnWrapper.config_name][0], config)
self._update_config(config, ispyb_params, target_file)
self._validate_config(config)
self._copy_processing_chain(config, ispyb_wd)
result_path = self._make_results_directory(config, ispyb_rd, override_path)
config_path = self._write_config(config, ispyb_wd)
v = DawnWrapper.param_prefix + DawnWrapper.version
version = ispyb_params.get(v, [DawnWrapper.default_version])[0]
command = [DawnWrapper.run_script]
command.append("-path")
command.append(config_path)
command.append("-version")
command.append(version)
command.append("-xmx")
command.append(config[DawnWrapper.memory])
logger.info("Command: %s", " ".join(command))
result = procrunner.run(command)
logger.info("Command successful, took %.1f seconds", result["runtime"])
utils.record_result(self, result_path, "Result")
utils.broadcast_primary_result(
self.recwrap, result_path, not result["exitcode"]
)
return not result["exitcode"]
def _load_config(self, config_path, config):
if not os.path.isfile(config_path):
raise RuntimeError("Config does not exist: %s" % config_path)
with open(config_path, "r") as fh:
data = json.load(fh)
config.update(data)
def _update_config(self, config, ispyb_config, target_file):
config[DawnWrapper.target] = target_file
for k in DawnWrapper.config_vals:
name = DawnWrapper.param_prefix + k
if name in ispyb_config:
val = ispyb_config[name][0]
out = val
if val == "False":
out = False
if val == "True":
out = True
if val.isdigit():
out = int(val)
config[k] = out
def _copy_processing_chain(self, config, working_dir):
p = config[DawnWrapper.proc_path]
new_loc = working_dir + "/" + Path(p).name
copyfile(p, new_loc)
config[DawnWrapper.proc_path] = new_loc
def _validate_config(self, config):
for k in DawnWrapper.required:
if k not in config:
raise RuntimeError("Required value missing from dawn config: %s" % k)
def _make_results_directory(self, config, ispyb_rd, override_name):
scan = Path(config[DawnWrapper.target]).stem + "-"
dataset = Path(config[DawnWrapper.dataset_path]).parts[2] + "-"
process = Path(config[DawnWrapper.proc_path]).stem + ".nxs"
result_filename = scan + dataset + process
name = ispyb_rd + "/" + result_filename
if not override_name.startswith("{") and os.path.exists(override_name):
name = override_name + "/" + result_filename
config[DawnWrapper.output_file] = name
return name
def _write_config(self, config, working_dir):
Path(working_dir).mkdir(parents=True, exist_ok=True)
path = working_dir + "/" + DawnWrapper.config_name + ".json"
with open(path, "w") as fh:
json.dump(config, fh, sort_keys=True, indent=2)
return path | zocalo-dls | /zocalo_dls-0.4.0.tar.gz/zocalo_dls-0.4.0/zocalo_dls/wrapper/dawn.py | dawn.py |
from zocalo.wrapper import BaseWrapper
import procrunner
import logging
from . import wrapper_utils_dls as utils
logger = logging.getLogger("ProcessRegisterWrapper")
class ProcessRegisterWrapper(BaseWrapper):
"""
A zocalo wrapper that runs a specified command in procrunner
Job parameters require the command to be run ("wrapped_commands"),
but can also include the name of a file and log which will be
broadcast as results.
"""
def run(self):
assert hasattr(self, "recwrap"), "No recipewrapper object found"
params = self.recwrap.recipe_step["job_parameters"]
command = params["wrapped_commands"]
self.modify_command(command, params)
logger.info("Command: %s", " ".join(command))
result = procrunner.run(command)
logger.info("Command successful, took %.1f seconds", result["runtime"])
if "filename" in params:
utils.record_result(self, params["filename"], "Result")
self.additional_file_actions(params["filename"], not result["exitcode"])
if "logname" in params:
utils.record_result(self, params["logname"], "Log")
return not result["exitcode"]
def modify_command(self, command, params):
pass
def additional_file_actions(self, path, success):
pass
class TargetProcessRegisterWrapper(ProcessRegisterWrapper):
"""
A zocalo wrapper that runs a target_file command in procrunner
Job parameters require the command to be run ("wrapped_commands")
but for this wrapper the name of a target file from the payload or
or job parameters will be appended to the command. If a file
name is included in the job params this will be broadcast on the
primary-result channel.
"""
def modify_command(self, command, params):
payload = self.recwrap.payload
tf = utils.get_target_file(payload, params)
command.append(tf)
def additional_file_actions(self, path, success):
utils.broadcast_primary_result(self.recwrap, path, success) | zocalo-dls | /zocalo_dls-0.4.0.tar.gz/zocalo_dls-0.4.0/zocalo_dls/wrapper/generic.py | generic.py |
from __future__ import absolute_import, division, print_function
import json
import os
import time
import ispyb
import mysql.connector
import six
import workflows.recipe
from workflows.services.common_service import CommonService
class ISPyB(CommonService):
"""A service that receives information to be written to ISPyB."""
# Human readable service name
_service_name = "ISPyB connector"
# Logger name
_logger_name = __name__
def initializing(self):
"""Subscribe the ISPyB connector queue. Received messages must be
acknowledged. Prepare ISPyB database connection."""
self.log.info("ISPyB connector using ispyb v%s", ispyb.__version__)
self.ispyb = ispyb.open("/dls_sw/apps/zocalo/secrets/credentials-ispyb-sp.cfg")
self.log.debug("ISPyB connector starting")
workflows.recipe.wrap_subscribe(
self._transport,
"ispyb_connector", # will become 'ispyb' in far future
self.receive_msg,
acknowledgement=True,
log_extender=self.extend_log,
allow_non_recipe_messages=True,
)
def receive_msg(self, rw, header, message):
"""Do something with ISPyB."""
if header.get("redelivered") == "true":
# A redelivered message may just have been processed in a parallel instance,
# which was connected to a different database server in the DB cluster. If
# we were to process it immediately we may run into a DB synchronization
# fault. Avoid this by giving the DB cluster a bit of time to settle.
self.log.debug("Received redelivered message, holding for a second.")
time.sleep(1)
if not rw:
# Incoming message is not a recipe message. Simple messages can be valid
if (
not isinstance(message, dict)
or not message.get("parameters")
or not message.get("content")
):
self.log.error("Rejected invalid simple message")
self._transport.nack(header)
return
self.log.debug("Received a simple message")
# Create a wrapper-like object that can be passed to functions
# as if a recipe wrapper was present.
class RW_mock(object):
def dummy(self, *args, **kwargs):
pass
rw = RW_mock()
rw.transport = self._transport
rw.recipe_step = {"parameters": message["parameters"]}
rw.environment = {"has_recipe_wrapper": False}
rw.set_default_channel = rw.dummy
rw.send = rw.dummy
message = message["content"]
command = rw.recipe_step["parameters"].get("ispyb_command")
if not command:
self.log.error("Received message is not a valid ISPyB command")
rw.transport.nack(header)
return
if not hasattr(self, "do_" + command):
self.log.error("Received unknown ISPyB command (%s)", command)
rw.transport.nack(header)
return
txn = rw.transport.transaction_begin()
rw.set_default_channel("output")
def parameters(parameter, replace_variables=True):
if isinstance(message, dict):
base_value = message.get(
parameter, rw.recipe_step["parameters"].get(parameter)
)
else:
base_value = rw.recipe_step["parameters"].get(parameter)
if (
not replace_variables
or not base_value
or not isinstance(base_value, six.string_types)
or "$" not in base_value
):
return base_value
for key in sorted(rw.environment, key=len, reverse=True):
if "${" + key + "}" in base_value:
base_value = base_value.replace(
"${" + key + "}", str(rw.environment[key])
)
# Replace longest keys first, as the following replacement is
# not well-defined when one key is a prefix of another:
if "$" + key in base_value:
base_value = base_value.replace("$" + key, str(rw.environment[key]))
return base_value
result = getattr(self, "do_" + command)(
rw=rw, message=message, parameters=parameters, transaction=txn
)
store_result = rw.recipe_step["parameters"].get("store_result")
if store_result and result and "return_value" in result:
rw.environment[store_result] = result["return_value"]
self.log.debug(
"Storing result '%s' in environment variable '%s'",
result["return_value"],
store_result,
)
if result and result.get("success"):
rw.send({"result": result.get("return_value")}, transaction=txn)
rw.transport.ack(header, transaction=txn)
elif result and result.get("checkpoint"):
rw.checkpoint(
result.get("return_value"),
delay=rw.recipe_step["parameters"].get("delay"),
transaction=txn,
)
rw.transport.ack(header, transaction=txn)
else:
rw.transport.transaction_abort(txn)
rw.transport.nack(header)
return
rw.transport.transaction_commit(txn)
def do_update_processing_status(self, parameters, **kwargs):
ppid = parameters("program_id")
message = parameters("message")
status = parameters("status")
try:
result = self.ispyb.mx_processing.upsert_program_ex(
program_id=ppid,
status={"success": 1, "failure": 0}.get(status),
time_start=parameters("start_time"),
time_update=parameters("update_time"),
message=message,
)
self.log.info(
"Updating program %s status: '%s' with result %s", ppid, message, result
)
return {"success": True, "return_value": result}
except ispyb.ISPyBException as e:
self.log.error(
"Updating program %s status: '%s' caused exception '%s'.",
ppid,
message,
e,
exc_info=True,
)
return False
def do_store_dimple_failure(self, parameters, **kwargs):
params = self.ispyb.mx_processing.get_run_params()
params["parentid"] = parameters("scaling_id")
params["pipeline"] = "dimple"
params["success"] = 0
params["message"] = "Unknown error"
params["run_dir"] = parameters("directory")
try:
result = self.ispyb.mx_processing.upsert_run(params.values())
return {"success": True, "return_value": result}
except ispyb.ISPyBException as e:
self.log.error(
"Updating DIMPLE failure for %s caused exception '%s'.",
params["parentid"],
e,
exc_info=True,
)
return False
def do_register_processing(self, parameters, **kwargs):
program = parameters("program")
cmdline = parameters("cmdline")
environment = parameters("environment")
if isinstance(environment, dict):
environment = ", ".join(
"%s=%s" % (key, value) for key, value in environment.items()
)
rpid = parameters("rpid")
if rpid and not rpid.isdigit():
self.log.error("Invalid processing id '%s'" % rpid)
return False
try:
result = self.ispyb.mx_processing.upsert_program_ex(
job_id=rpid, name=program, command=cmdline, environment=environment
)
self.log.info(
"Registered new program '%s' for processing id '%s' with command line '%s' and environment '%s' with result '%s'.",
program,
rpid,
cmdline,
environment,
result,
)
return {"success": True, "return_value": result}
except ispyb.ISPyBException as e:
self.log.error(
"Registering new program '%s' for processing id '%s' with command line '%s' and environment '%s' caused exception '%s'.",
program,
rpid,
cmdline,
environment,
e,
exc_info=True,
)
return False
def do_add_program_attachment(self, parameters, **kwargs):
params = self.ispyb.mx_processing.get_program_attachment_params()
params["parentid"] = parameters("program_id")
try:
programid = int(params["parentid"])
except ValueError:
programid = None
if not programid:
self.log.warning("Encountered invalid program ID '%s'", params["parentid"])
return False
params["file_name"] = parameters("file_name", replace_variables=False)
params["file_path"] = parameters("file_path", replace_variables=False)
fqpn = os.path.join(params["file_path"], params["file_name"])
if not os.path.isfile(fqpn):
self.log.error(
"Not adding attachment '%s' to data processing: File does not exist",
str(fqpn),
)
return False
params["file_type"] = str(parameters("file_type")).lower()
if params["file_type"] not in ("log", "result", "graph"):
self.log.warning(
"Attachment type '%s' unknown, defaulting to 'log'", params["file_type"]
)
params["file_type"] = "log"
self.log.debug("Writing program attachment to database: %s", params)
result = self.ispyb.mx_processing.upsert_program_attachment(
list(params.values())
)
return {"success": True, "return_value": result}
def do_add_datacollection_attachment(self, parameters, **kwargs):
params = self.ispyb.mx_acquisition.get_data_collection_file_attachment_params()
params["parentid"] = parameters("dcid")
file_name = parameters("file_name", replace_variables=False)
file_path = parameters("file_path", replace_variables=False)
params["file_full_path"] = os.path.join(file_path, file_name)
if not os.path.isfile(params["file_full_path"]):
self.log.error(
"Not adding attachment '%s' to data collection: File does not exist",
str(params["file_full_path"]),
)
return False
params["file_type"] = str(parameters("file_type")).lower()
if params["file_type"] not in ("snapshot", "log", "xy", "recip", "pia"):
self.log.warning(
"Attachment type '%s' unknown, defaulting to 'log'", params["file_type"]
)
params["file_type"] = "log"
self.log.debug("Writing data collection attachment to database: %s", params)
result = self.ispyb.mx_acquisition.upsert_data_collection_file_attachment(
list(params.values())
)
return {"success": True, "return_value": result}
def do_store_per_image_analysis_results(self, parameters, **kwargs):
params = self.ispyb.mx_processing.get_quality_indicators_params()
params["datacollectionid"] = parameters("dcid")
if not params["datacollectionid"]:
self.log.error("DataCollectionID not specified")
return False
params["image_number"] = parameters("file-pattern-index") or parameters(
"file-number"
)
if not params["image_number"]:
self.log.error("Image number not specified")
return False
params["dozor_score"] = parameters("dozor_score")
params["spot_total"] = parameters("n_spots_total")
if params["spot_total"] is not None:
params["in_res_total"] = params["spot_total"]
params["icerings"] = 0
params["maxunitcell"] = 0
params["pctsaturationtop50peaks"] = 0
params["inresolutionovrlspots"] = 0
params["binpopcutoffmethod2res"] = 0
elif params["dozor_score"] is None:
self.log.error("Message contains neither dozor score nor spot count")
return False
params["totalintegratedsignal"] = parameters("total_intensity")
params["good_bragg_candidates"] = parameters("n_spots_no_ice")
params["method1_res"] = parameters("estimated_d_min")
params["method2_res"] = parameters("estimated_d_min")
params["programid"] = "65228265" # dummy value
self.log.debug(
"Writing PIA record for image %r in DCID %s",
params["image_number"],
params["datacollectionid"],
)
try:
# result = "159956186" # for testing
result = self._retry_mysql_call(
self.ispyb.mx_processing.upsert_quality_indicators,
list(params.values()),
)
except ispyb.ReadWriteError as e:
self.log.error(
"Could not write PIA results %s to database: %s",
params,
e,
exc_info=True,
)
return False
else:
return {"success": True, "return_value": result}
def do_insert_screening(self, parameters, **kwargs):
"""Write entry to the Screening table."""
# screening_params: ['id', 'dcgid', 'dcid', 'programversion', 'shortcomments', 'comments']
screening_params = self.ispyb.mx_screening.get_screening_params()
for k in screening_params.keys():
screening_params[k] = parameters(k)
self.log.info("screening_params: %s", screening_params)
try:
screeningId = self.ispyb.mx_screening.insert_screening(
list(screening_params.values())
)
assert screeningId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Inserting screening results: '%s' caused exception '%s'.",
screening_params,
e,
exc_info=True,
)
return False
self.log.info("Written Screening record with ID %s", screeningId)
return {"success": True, "return_value": screeningId}
def do_insert_screening_output(self, parameters, **kwargs):
"""Write entry to the ScreeningOutput table."""
# output_params: ['id', 'screeningid', 'statusdescription', 'rejectedreflections', 'resolutionobtained', 'spotdeviationr', 'spotdeviationtheta', 'beamshiftx', 'beamshifty', 'numspotsfound', 'numspotsused', 'numspotsrejected', 'mosaicity', 'ioversigma', 'diffractionrings', 'mosaicityestimated', 'rankingresolution', 'program', 'dosetotal', 'totalexposuretime', 'totalrotationrange', 'totalnoimages', 'rfriedel', 'indexingsuccess', 'strategysuccess', 'alignmentsuccess']
output_params = self.ispyb.mx_screening.get_screening_output_params()
for k in output_params.keys():
output_params[k] = parameters(k)
output_params["screening_id"] = parameters("screening_id")
output_params["alignmentSuccess"] = 1 if parameters("alignmentSuccess") else 0
output_params["mosaicityEstimated"] = 1 if parameters("mosaicity") else 0
output_params["indexingSuccess"] = 1
output_params["strategySuccess"] = 1
self.log.info("output_params: %s", output_params)
try:
screeningOutputId = self.ispyb.mx_screening.insert_screening_output(
list(output_params.values())
)
assert screeningOutputId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Inserting screening output: '%s' caused exception '%s'.",
output_params,
e,
exc_info=True,
)
return False
self.log.info("Written ScreeningOutput record with ID %s", screeningOutputId)
return {"success": True, "return_value": screeningOutputId}
def do_insert_screening_output_lattice(self, parameters, **kwargs):
"""Write entry to the ScreeningOutputLattice table."""
# output_lattice_params ['id', 'screeningoutputid', 'spacegroup', 'pointgroup', 'bravaislattice', 'raworientationmatrixax', 'raworientationmatrixay', 'raworientationmatrixaz', 'raworientationmatrixbx', 'raworientationmatrixby', 'raworientationmatrixbz', 'raworientationmatrixcx', 'raworientationmatrixcy', 'raworientationmatrixcz', 'unitcella', 'unitcellb', 'unitcellc', 'unitcellalpha', 'unitcellbeta', 'unitcellgamma', 'labelitindexing']
output_lattice_params = (
self.ispyb.mx_screening.get_screening_output_lattice_params()
)
for k in output_lattice_params.keys():
output_lattice_params[k] = parameters(k)
output_lattice_params["screening_output_id"] = parameters("screening_output_id")
self.log.info("output_lattice_params: %s", output_lattice_params)
try:
screeningOutputLatticeId = (
self.ispyb.mx_screening.insert_screening_output_lattice(
list(output_lattice_params.values())
)
)
assert screeningOutputLatticeId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Inserting screening output lattice: '%s' caused exception '%s'.",
output_lattice_params,
e,
exc_info=True,
)
return False
return {"success": True, "return_value": screeningOutputLatticeId}
def do_insert_screening_strategy(self, parameters, **kwargs):
"""Write entry to the ScreeningStrategy table."""
# strategy_params ['id', 'screeningoutputid', 'phistart', 'phiend', 'rotation', 'exposuretime', 'resolution', 'completeness', 'multiplicity', 'anomalous', 'program', 'rankingresolution', 'transmission']
strategy_params = self.ispyb.mx_screening.get_screening_strategy_params()
for k in strategy_params.keys():
strategy_params[k] = parameters(k)
strategy_params["screening_output_id"] = parameters("screening_output_id")
strategy_params["anomalous"] = parameters("anomalous") or 0
self.log.info("strategy_params: %s", strategy_params)
try:
screeningStrategyId = self.ispyb.mx_screening.insert_screening_strategy(
list(strategy_params.values())
)
assert screeningStrategyId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Inserting screening strategy: '%s' caused exception '%s'.",
strategy_params,
e,
exc_info=True,
)
return False
return {"success": True, "return_value": screeningStrategyId}
def do_insert_screening_strategy_wedge(self, parameters, **kwargs):
"""Write entry to the ScreeningStrategyWedge table."""
# wedge_params ['id', 'screeningstrategyid', 'wedgenumber', 'resolution', 'completeness', 'multiplicity', 'dosetotal', 'noimages', 'phi', 'kappa', 'chi', 'comments', 'wavelength']
wedge_params = self.ispyb.mx_screening.get_screening_strategy_wedge_params()
for k in wedge_params.keys():
wedge_params[k] = parameters(k)
wedge_params["screening_strategy_id"] = parameters("screening_strategy_id")
wedge_params["wedgenumber"] = parameters("wedgenumber") or "1"
self.log.info("wedge_params: %s", wedge_params)
try:
screeningStrategyWedgeId = (
self.ispyb.mx_screening.insert_screening_strategy_wedge(
list(wedge_params.values())
)
)
assert screeningStrategyWedgeId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Inserting strategy wedge: '%s' caused exception '%s'.",
wedge_params,
e,
exc_info=True,
)
return False
return {"success": True, "return_value": screeningStrategyWedgeId}
def do_insert_screening_strategy_sub_wedge(self, parameters, **kwargs):
"""Write entry to the ScreeningStrategySubWedge table."""
# sub_wedge_params ['id', 'screeningstrategywedgeid', 'subwedgenumber', 'rotationaxis', 'axisstart', 'axisend', 'exposuretime', 'transmission', 'oscillationrange', 'completeness', 'multiplicity', 'resolution', 'dosetotal', 'noimages', 'comments']
sub_wedge_params = (
self.ispyb.mx_screening.get_screening_strategy_sub_wedge_params()
)
for k in sub_wedge_params.keys():
sub_wedge_params[k] = parameters(k)
sub_wedge_params["screening_strategy_wedge_id"] = parameters(
"screening_strategy_wedge_id"
)
sub_wedge_params["subwedgenumber"] = "1"
self.log.info("sub_wedge_params: %s", sub_wedge_params)
try:
screeningStrategySubWedgeId = (
self.ispyb.mx_screening.insert_screening_strategy_sub_wedge(
list(sub_wedge_params.values())
)
)
assert screeningStrategySubWedgeId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Inserting strategy sub wedge: '%s' caused exception '%s'.",
sub_wedge_params,
e,
exc_info=True,
)
return False
return {"success": True, "return_value": screeningStrategySubWedgeId}
def do_register_integration(self, **kwargs):
# deprecated
return self.do_upsert_integration(**kwargs)
def do_upsert_integration(self, parameters, **kwargs):
"""Insert or update an AutoProcIntegration record.
Parameters, amongst others defined by the ISPyB API:
:dcid: related DataCollectionID
:integration_id: AutoProcIntegrationID, if defined will UPDATE otherwise INSERT
:program_id: related AutoProcProgramID
:scaling_id: related AutoProcScalingID
:returns: AutoProcIntegrationID
ISPyB-API call: upsert_integration
"""
self.log.info(
"Saving integration result record (%s) for DCID %s and APPID %s",
parameters("integration_id") or "new",
parameters("dcid"),
parameters("program_id"),
)
params = self.ispyb.mx_processing.get_integration_params()
params["datacollectionid"] = parameters("dcid")
params["id"] = parameters("integration_id")
params["parentid"] = parameters("scaling_id")
params["programid"] = parameters("program_id")
for key in (
"anom",
"beam_vec_x",
"beam_vec_y",
"beam_vec_z",
"cell_a",
"cell_b",
"cell_c",
"cell_alpha",
"cell_beta",
"cell_gamma",
"start_image_no",
"end_image_no",
"refined_detector_dist",
"refined_xbeam",
"refined_ybeam",
"rot_axis_x",
"rot_axis_y",
"rot_axis_z",
):
params[key] = parameters(key)
try:
autoProcIntegrationId = self.ispyb.mx_processing.upsert_integration(
list(params.values())
)
assert autoProcIntegrationId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Encountered exception %s when attempting to insert/update integration record '%s'",
e,
params,
exc_info=True,
)
return False
self.log.info("Saved integration record ID %s", autoProcIntegrationId)
return {"success": True, "return_value": autoProcIntegrationId}
def do_write_autoproc(self, parameters, **kwargs):
"""Write entry to the AutoProc table."""
params = self.ispyb.mx_processing.get_processing_params()
params["id"] = parameters("autoproc_id") # will create a new record
# if undefined
params["parentid"] = parameters("program_id")
for key in (
"spacegroup",
"refinedcell_a",
"refinedcell_b",
"refinedcell_c",
"refinedcell_alpha",
"refinedcell_beta",
"refinedcell_gamma",
):
params[key] = parameters(key)
try:
autoProcId = self.ispyb.mx_processing.upsert_processing(
list(params.values())
)
assert autoProcId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Writing AutoProc record '%s' caused exception '%s'.",
params,
e,
exc_info=True,
)
return False
self.log.info("Written AutoProc record with ID %s", autoProcId)
return {"success": True, "return_value": autoProcId}
def do_insert_scaling(self, parameters, **kwargs):
"""Write a 3-column scaling statistics table to the database.
Parameters:
:autoproc_id: AutoProcId, key to AutoProc table
:outerShell: dictionary containing scaling statistics
:innerShell: dictionary containing scaling statistics
:overall: dictionary containing scaling statistics
:returns: AutoProcScalingId
ISPyB-API call: insert_scaling
"""
autoProcId = parameters("autoproc_id")
stats = {
"outerShell": self.ispyb.mx_processing.get_outer_shell_scaling_params(),
"innerShell": self.ispyb.mx_processing.get_inner_shell_scaling_params(),
"overall": self.ispyb.mx_processing.get_overall_scaling_params(),
}
for shell in stats:
for key in (
"anom",
"anom_completeness",
"anom_multiplicity",
"cc_anom",
"cc_half",
"comments",
"completeness",
"fract_partial_bias",
"mean_i_sig_i",
"multiplicity",
"n_tot_obs",
"n_tot_unique_obs",
"r_meas_all_iplusi_minus",
"r_meas_within_iplusi_minus",
"r_merge",
"r_pim_all_iplusi_minus",
"r_pim_within_iplusi_minus",
"res_lim_high",
"res_lim_low",
):
stats[shell][key] = parameters(shell).get(key)
try:
scalingId = self.ispyb.mx_processing.insert_scaling(
autoProcId,
list(stats["outerShell"].values()),
list(stats["innerShell"].values()),
list(stats["overall"].values()),
)
assert scalingId is not None
except (ispyb.ISPyBException, AssertionError) as e:
self.log.error(
"Encountered exception %s when attempting to insert scaling "
"statistics '%s' for AutoProcId %s",
e,
stats,
autoProcId,
exc_info=True,
)
return False
self.log.info(
"Written scaling statistics record %s for AutoProc ID %s",
scalingId,
autoProcId,
)
return {"success": True, "return_value": scalingId}
def do_retrieve_programs_for_job_id(self, parameters, **kwargs):
"""Retrieve the processing instances associated with the given processing job ID"""
processingJobId = parameters("rpid")
result = self.ispyb.mx_processing.retrieve_programs_for_job_id(processingJobId)
serial_result = []
for row in result:
el = {}
for k, v in row.items():
try:
json.dumps(v)
el[k] = v
except TypeError:
continue
serial_result.append(el)
return {"success": True, "return_value": serial_result}
def do_retrieve_program_attachments_for_program_id(self, parameters, **kwargs):
"""Retrieve the processing program attachments associated with the given processing program ID"""
autoProcProgramId = parameters("program_id")
result = self.ispyb.mx_processing.retrieve_program_attachments_for_program_id(
autoProcProgramId
)
return {"success": True, "return_value": result}
def do_multipart_message(self, rw, message, **kwargs):
"""The multipart_message command allows the recipe or client to specify a
multi-stage operation. With this you can process a list of API calls,
for example
* do_upsert_processing
* do_insert_scaling
* do_upsert_integration
Each API call may have a return value that can be stored.
Multipart_message takes care of chaining and checkpointing to make the
overall call near-ACID compliant."""
if not rw.environment.get("has_recipe_wrapper", True):
self.log.error(
"Multipart message call can not be used with simple messages"
)
return False
checkpoint = 1
commands = rw.recipe_step["parameters"].get("ispyb_command_list")
if isinstance(message, dict) and isinstance(
message.get("ispyb_command_list"), list
):
commands = message["ispyb_command_list"]
checkpoint = message.get("checkpoint", 0) + 1
if not commands:
self.log.error("Received multipart message containing no commands")
return False
current_command = commands.pop(0)
command = current_command.get("ispyb_command")
if not command:
self.log.error(
"Multipart command %s is not a valid ISPyB command", current_command
)
return False
if not hasattr(self, "do_" + command):
self.log.error("Received unknown ISPyB command (%s)", command)
return False
self.log.debug(
"Processing step %d of multipart message (%s) with %d further steps",
checkpoint,
command,
len(commands),
)
# Create a parameter lookup function specific to this step of the
# multipart message
def parameters(parameter, replace_variables=True):
"""Slight change in behaviour compared to 'parameters' in a direct call:
If the value is defined in the command list item then this takes
precedence. Otherwise we check the original message content. Finally
we look in parameters dictionary of the recipe step for the
multipart_message command.
String replacement rules apply as usual."""
if parameter in current_command:
base_value = current_command[parameter]
elif isinstance(message, dict) and parameter in message:
base_value = message[parameter]
else:
base_value = rw.recipe_step["parameters"].get(parameter)
if (
not replace_variables
or not base_value
or not isinstance(base_value, six.string_types)
or "$" not in base_value
):
return base_value
for key in sorted(rw.environment, key=len, reverse=True):
if "${" + key + "}" in base_value:
base_value = base_value.replace(
"${" + key + "}", str(rw.environment[key])
)
# Replace longest keys first, as the following replacement is
# not well-defined when one key is a prefix of another:
if "$" + key in base_value:
base_value = base_value.replace("$" + key, str(rw.environment[key]))
return base_value
kwargs["parameters"] = parameters
# Run the multipart step
result = getattr(self, "do_" + command)(rw=rw, message=message, **kwargs)
# Store step result if appropriate
store_result = current_command.get("store_result")
if store_result and result and "return_value" in result:
rw.environment[store_result] = result["return_value"]
self.log.debug(
"Storing result '%s' in environment variable '%s'",
result["return_value"],
store_result,
)
# If the step did not succeed then propagate failure
if not result or not result.get("success"):
self.log.debug("Multipart command failed")
return result
# If the multipart command is finished then propagate success
if not commands:
self.log.debug("and done.")
return result
# If there are more steps then checkpoint the current state
# and put it back on the queue
self.log.debug("Checkpointing remaining %d steps", len(commands))
if isinstance(message, dict):
checkpoint_dictionary = message
else:
checkpoint_dictionary = {}
checkpoint_dictionary["checkpoint"] = checkpoint
checkpoint_dictionary["ispyb_command_list"] = commands
return {"checkpoint": True, "return_value": checkpoint_dictionary}
def _retry_mysql_call(self, function, *args, **kwargs):
tries = 0
while True:
try:
return function(*args, **kwargs)
except (
mysql.connector.errors.InternalError,
mysql.connector.errors.IntegrityError,
) as e:
tries = tries + 1
if tries < 3:
self.log.warning(
"ISPyB call %s try %d failed with %s",
function,
tries,
e,
exc_info=True,
)
continue
else:
raise | zocalo-dls | /zocalo_dls-0.4.0.tar.gz/zocalo_dls-0.4.0/zocalo_dls/service/ispybsvc.py | ispybsvc.py |
=======
History
=======
Unreleased
----------
0.30.0 (2023-06-26)
-------------------
* ``zocalo.configure_rabbitmq``: non-zero exit code on ``HTTPError``
0.29.0 (2023-06-02)
-------------------
* Expose ``url``, ``version``, ``user_name`` and ``user_token`` attributes on ``SlurmRestApi`` instances
* Load ``user_token`` from external file in ``SlurmRestApi`` rather than in the ``zocalo.configuration`` Slurm plugin to allow token changes to be picked up without re-loading ``zocalo.configuration``
0.28.0 (2023-03-20)
-------------------
* Add minimal wrapper for the Slurm REST API to allow job submission
* Add Slurm ``zocalo.configuration`` plugin
0.27.0 (2023-03-16)
-------------------
* Add Dockerfile and build-and-push-docker-image GitHub workflow
* Add ``zocalo.pickup`` command for re-submitting messages stored in the ``zocalo.go.fallback_location`` while the message broker is unavailable
0.26.0 (2022-11-04)
-------------------
* Add dispatcher service
* Add support for Python 3.11
0.25.1 (2022-10-19)
-------------------
* ``JSONLines`` service: trigger ``process_messages`` immediately when reaching 100 stored messages
0.25.0 (2022-10-13)
-------------------
* Add ``JSONLines`` service for appending messages to a file in jsonlines format
0.24.2 (2022-10-04)
-------------------
* ``zocalo.configure_rabbitmq`` cli: downgrade "No matching queue found" error to warning
0.24.1 (2022-08-24)
-------------------
* ``zocalo.configure_rabbitmq`` cli: additional debugging output in event of rare ``IndexError``
0.24.0 (2022-08-17)
-------------------
* ``zocalo.configure_rabbitmq`` cli: enable configuration of vhosts
0.23.0 (2022-08-02)
-------------------
* Remove deprecated ``zocalo.enable_graylog()`` function
* Use ``LoggingAdapter`` to append ``recipe_ID`` to wrapper logs.
This was inadvertantly broken for the logging plugin added in #176.
Derived wrappers should now use self.log rather than instantiating
a logger directly.
0.22.0 (2022-07-12)
-------------------
* ``zocalo.wrapper``: Enable access to ``zocalo.configuration`` object through ``BaseWrapper.config`` attribute
* ``zocalo.configure_rabbitmq`` cli: check response status codes to catch failed API calls
* ``zocalo.configure_rabbitmq`` cli: don't set x-single-active-consumer for streams
0.21.0 (2022-06-28)
-------------------
* ``zocalo.configure_rabbitmq`` cli: require passing user config
via explicit ``--user-config`` parameter
* ``zocalo.configure_rabbitmq`` cli: optionally disable implicit
dlq creation via ``dead-letter-queue-create: false``
0.20.0 (2022-06-17)
-------------------
* ``zocalo.configure_rabbitmq`` cli: require explicit
`dead-letter-routing-key-pattern` when requesting
creation of a DLQ for a given queue.
0.19.0 (2022-05-24)
-------------------
* ``zocalo.configure_rabbitmq`` cli: advanced binding configuration
0.18.0 (2022-04-12)
-------------------
* Added a logging configuration plugin to comprehensively
configure logging across applications.
0.17.0 (2022-03-03)
-------------------
* ``zocalo.configure_rabbitmq`` cli:
* Support for explicitly declaring exchanges
* Allow queues to bind to more than one exchange
0.16.0 (2022-02-21)
-------------------
* Add ``Mailer`` service for sending email notifications.
Subscribes to the ``mailnotification`` queue. SMTP settings are specified
via the ``smtp`` plugin in ``zocalo.configuration``.
0.15.0 (2022-02-16)
-------------------
* Fix for getting user information from the RabbitMQ management API
* Major changes to the RabbitMQ configuration command line tool.
Users are now updated and deleted, and the tool now understands
zocalo environment parameters. Configuration files are now
mandatory, and the ``--seed`` parameter has been removed.
0.14.0 (2021-12-14)
-------------------
* ``zocalo.dlq_purge`` offers a ``--location`` flag to override where files are
being written
* ``zocalo.dlq_reinject`` can again understand ``zocalo.dlq_purge`` output
passed on stdin
* Reinjected messages now carry a ``dlq-reinjected: True`` header field
0.13.0 (2021-12-01)
-------------------
* ``zocalo.queue_drain`` now allows the automatic determination
of destination queues for recipe messages
* ``zocalo.queue_drain`` fixed for use in a RabbitMQ environment
* ``zocalo.dlq_purge`` fixed for use in a RabbitMQ environment
* New functions in ``zocalo.util`` to easily annotate log messages
with system context information
0.12.0 (2021-11-15)
-------------------
* Add support for queue/exchange bindings to ``RabbitMQAPI``
* Drop support for Python 3.6 and 3.7
0.11.1 (2021-11-08)
-------------------
* Add a RabbitMQ HTTP API in ``zocalo.util.rabbitmq``
0.11.0 (2021-11-03)
-------------------
* Add command line tools for handling dead-letter messages
* ``zocalo.dlq_check`` checks dead-letter queues for messages
* ``zocalo.dlq_purge`` removes messages from specified DLQs and dumps them to a directory
specified in the Zocalo configuration
* ``zocalo.dlq_reinject`` takes a serialised message produced by ``zocalo.dlq_purge`` and
places it back on a queue
* Use ``argparse`` for all command line tools and make use of ``workflows`` transport
argument injection. Minimum ``workflows`` version is now 2.14
* New ``zocalo.util.rabbitmq.RabbitMQAPI()`` providing a thin wrapper around the
RabbitMQ HTTP API
0.10.0 (2021-10-04)
-------------------
* New ``zocalo.shutdown`` command to shutdown Zocalo services
* New ``zocalo.queue_drain`` command to drain one queue into another in a controlled manner
* New ``zocalo.util.rabbitmq.http_api_request()`` utility function to return a
``urllib.request.Request`` object to query the RabbitMQ API using the credentials
specified via ``zocalo.configuration``.
* ``zocalo.wrap`` now emits tracebacks on hard crashes and ``SIGUSR2`` signals
0.9.1 (2021-08-18)
------------------
* Expand ~ in paths in configuration files
0.9.0 (2021-08-18)
------------------
* Removed --live/--test command line arguments, use -e/--environment instead
* zocalo.go, zocalo.service, zocalo.wrap accept -t/--transport command line
options, and the default can be set via the site configuration.
0.8.1 (2021-07-08)
------------------
* Keep wrapper status threads alive through transport disconnection events
0.8.0 (2021-05-18)
------------------
* Support for Zocalo configuration files
0.7.4 (2021-03-17)
------------------
* Documentation improvements
0.7.3 (2021-01-19)
------------------
* Ignore error when logserver hostname can't be looked up immediately
0.7.2 (2021-01-18)
------------------
* Add a symbolic link handling library function
* Cache the logserver hostname by default
0.7.1 (2020-11-13)
------------------
* Add a --dry-run option to zocalo.go
0.7.0 (2020-11-02)
------------------
* Drop support for Python 3.5
* Update language constructs for Python 3.6+
0.6.4 (2020-11-02)
------------------
* Add support for Python 3.9
0.6.3 (2020-05-25)
------------------
* Remove stomp.py requirement - this is pulled in via workflows only
0.6.2 (2019-07-16)
------------------
* Set live flag in service environment if service started with '--live'
0.6.0 (2019-06-17)
------------------
* Start moving dlstbx scripts to zocalo package:
* zocalo.go
* zocalo.wrap
* Entry point 'dlstbx.wrappers' has been renamed 'zocalo.wrappers'
* Dropped Python 3.4 support
0.5.4 (2019-03-22)
------------------
* Compatibility fixes for graypy >= 1.0
0.5.2 (2018-12-11)
------------------
* Don't attempt to load non-existing file
0.5.1 (2018-12-04)
------------------
* Fix packaging bug which meant files were missing from the release
0.5.0 (2018-12-04)
------------------
* Add zocalo.service command to start services
0.4.0 (2018-12-04)
------------------
* Add status notification thread logic
0.3.0 (2018-12-04)
------------------
* Add schlockmeister service and base wrapper class
0.2.0 (2018-11-28)
------------------
* Add function to enable logging to graylog
0.1.0 (2018-10-19)
------------------
* First release on PyPI.
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/HISTORY.rst | HISTORY.rst |
======
Zocalo
======
.. image:: https://img.shields.io/pypi/v/zocalo.svg
:target: https://pypi.python.org/pypi/zocalo
:alt: PyPI release
.. image:: https://img.shields.io/conda/vn/conda-forge/zocalo.svg
:target: https://anaconda.org/conda-forge/zocalo
:alt: Conda version
.. image:: https://dev.azure.com/zocalo/python-zocalo/_apis/build/status/DiamondLightSource.python-zocalo?branchName=main
:target: https://dev.azure.com/zocalo/python-zocalo/_build/latest?definitionId=2&branchName=main
:alt: Build status
.. image:: https://img.shields.io/lgtm/grade/python/g/DiamondLightSource/python-zocalo.svg?logo=lgtm&logoWidth=18
:target: https://lgtm.com/projects/g/DiamondLightSource/python-zocalo/context:python
:alt: Language grade: Python
.. image:: https://img.shields.io/lgtm/alerts/g/DiamondLightSource/python-zocalo.svg?logo=lgtm&logoWidth=18
:target: https://lgtm.com/projects/g/DiamondLightSource/python-zocalo/alerts/
:alt: Total alerts
.. image:: https://readthedocs.org/projects/zocalo/badge/?version=latest
:target: https://zocalo.readthedocs.io/en/latest/?badge=latest
:alt: Documentation status
.. image:: https://img.shields.io/pypi/pyversions/zocalo.svg
:target: https://pypi.org/project/zocalo/
:alt: Supported Python versions
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/ambv/black
:alt: Code style: black
.. image:: https://img.shields.io/pypi/l/zocalo.svg
:target: https://pypi.python.org/pypi/zocalo
:alt: BSD license
..
|
| `M. Gerstel, A. Ashton, R.J. Gildea, K. Levik, and G. Winter, "Data Analysis Infrastructure for Diamond Light Source Macromolecular & Chemical Crystallography and Beyond", in Proc. ICALEPCS'19, New York, NY, USA, Oct. 2019, pp. 1031-1035. <https://doi.org/10.18429/JACoW-ICALEPCS2019-WEMPR001>`_ |DOI|
.. |DOI| image:: https://img.shields.io/badge/DOI-10.18429/JACoW--ICALEPCS2019--WEMPR001-blue.svg
:target: https://doi.org/10.18429/JACoW-ICALEPCS2019-WEMPR001
:alt: Primary Reference DOI
|
Zocalo is an automated data processing system designed at Diamond Light Source. This repository contains infrastructure components for Zocalo.
The idea of Zocalo is a simple one - to build a messaging framework, where text-based messages are sent between parts of the system to coordinate data analysis. In the wider scope of things this also covers things like archiving, but generally it is handling everything that happens after data aquisition.
Zocalo as a wider whole is made up of two repositories (plus some private internal repositories when deployed at Diamond):
* `DiamondLightSource/python-zocalo <https://github.com/DiamondLightSource/python-zocalo>`_ - Infrastructure components for automated data processing, developed by Diamond Light Source. The package is available through `PyPi <https://pypi.org/project/zocalo/>`__ and `conda-forge <https://anaconda.org/conda-forge/zocalo>`__.
* `DiamondLightSource/python-workflows <https://github.com/DiamondLightSource/python-workflows/>`_ - Zocalo is built on the workflows package. It shouldn't be necessary to interact too much with this package, as the details are abstracted by Zocalo. workflows controls the logic of how services connect to each other and what a service is, and actually send the messages to a message broker. Currently this is an ActiveMQ_ broker (via STOMP_) but support for a RabbitMQ_ broker (via pika_) is being added. This is also available on `PyPi <https://pypi.org/project/workflows/>`__ and `conda-forge <https://anaconda.org/conda-forge/workflows>`__.
As mentioned, Zocalo is currently built on top of ActiveMQ. ActiveMQ is an apache project that provides a `message broker <https://en.wikipedia.org/wiki/Message_broker>`_ server, acting as a central dispatch that allows various services to communicate. Messages are plaintext, but from the Zocalo point of view it's passing aroung python objects (json dictionaries). Every message sent has a destination to help the message broker route. Messages may either be sent to a specific queue or broadcast to multiple queues. These queues are subscribed to by the services that run in Zocalo. In developing with Zocalo, you may have to interact with ActiveMQ or RabbitMQ, but it is unlikely that you will have to configure it.
Zocalo allows for the monitoring of jobs executing ``python-workflows`` services or recipe wrappers. The ``python-workflows`` package contains most of the infrastructure required for the jobs themselves and more detailed documentation of its components can be found in the ``python-workflows`` `GitHub repository <https://github.com/DiamondLightSource/python-workflows/>`_ and `the Zocalo documentation <https://zocalo.readthedocs.io>`_.
.. _ActiveMQ: http://activemq.apache.org/
.. _STOMP: https://stomp.github.io/
.. _RabbitMQ: https://www.rabbitmq.com/
.. _pika: https://github.com/pika/pika
Core Concepts
-------------
There are two kinds of task run in Zocalo: *services* and *wrappers*.
A service should handle a discrete short-lived task, for example a data processing job on a small data packet (e.g. finding spots on a single image in an X-ray crystallography context), or inserting results into a database.
In contrast, wrappers can be used for longer running tasks, for example running data processing programs such as xia2_ or fast_ep_.
* A **service** starts in the background and waits for work. There are many services constantly running as part of normal Zocalo operation. In typical usage at Diamond there are ~100 services running at a time.
* A **wrapper** on the other hand, is only run when needed. They wrap something that is not necessarily aware of Zocalo - e.g. downstream processing software such as xia2 have no idea what zocalo is, and shouldn't have to. A wrapper takes a message, converts to the instantiation of command line, runs the software - typically as a cluster job, then reformats the results into a message to send back to Zocalo. These processes have no idea what Zocalo is, but are being run by a script that handles the wrapping.
At Diamond, everything goes to one service to start with: the **Dispatcher**. This takes the initial request message and attaches useful information for the rest of Zocalo. The implementation of the Dispatcher at Diamond is environment specific and not public, but it does some things that would be useful for a similar service to do in other contexts. At Diamond there is interaction with the `ISPyB database <https://github.com/DiamondLightSource/ispyb-database>`_ that stores information about what is run, metadata, how many images, sample type etc. Data stored in the database influences what software we want to be running and this information might need to be read from the database in many, many services. We obviously don't want to read the same thing from many clients and flood the database, and don't want the database to be a single point of failure. The dispatcher front-loads all the database operations - it takes the data collection ID (DCID) and looks up in ISPyB all the information that could be needed for processing. In terms of movement through the system, it sits between the initial message and the services:
.. code:: bash
message -> Dispatcher -> [Services]
At end of processing there might be information that needs to go back into the databases, for which Diamond has a special ISPyB service to do the writing. If the DB goes down, that is fine - things will queue up for the ISPyB service and get processed when the database becomes available again, and written to the database when ready. This isolates us somewhat from intermittent failures.
The only public Zocalo service at present is ``Schlockmeister``, a garbage collection service that removes jobs that have been requeued mutliple times. Diamond operates a variety of internal Zocalo services which perform frequently required operations in a data analysis pipeline.
.. _xia2: https://xia2.github.io/
.. _fast_ep: https://github.com/DiamondLightSource/fast_ep
Working with Zocalo
-------------------
`Graylog <https://www.graylog.org/>`_ is used to manage the logs produced by Zocalo. Once Graylog and the message broker server are running then services and wrappers can be launched with Zocalo.
Zocalo provides the following command line tools::
* ``zocalo.go``: trigger the processing of a recipe
* ``zocalo.wrap``: run a command while exposing its status to Zocalo so that it can be tracked
* ``zocalo.service``: start a new instance of a service
* ``zocalo.shutdown``: shutdown either specific instances of Zocalo services or all instances for a given type of service
* ``zocalo.queue_drain``: drain one queue into another in a controlled manner
Services are available through ``zocalo.service`` if they are linked through the ``workflows.services`` entry point in ``setup.py``. For example, to start a Schlockmeister service:
.. code:: bash
$ zocalo.service -s Schlockmeister
.. list-table::
:widths: 100
:header-rows: 1
* - Q: How are services started?
* - A: Zocalo itself is agnostic on this point. Some of the services are self-propagating and employ simple scaling behaviour - in particular the per-image-analysis services. The services in general all run on cluster nodes, although this means that they can not be long lived - beyond a couple of hours there is a high risk of the service cluster jobs being terminated or pre-empted. This also helps encourage programming more robust services if they could be killed.
.. list-table::
:widths: 100
:header-rows: 1
* - Q: So if a service is terminated in the middle of processing it will still get processed?
* - A: Yes, messages are handled in transactions - while a service is processing a message, it's marked as "in-progress" but isn't completely dropped. If the service doesn't process the message, or it's connection to ActiveMQ gets dropped, then it get's requeued so that another instance of the service can pick it up.
Repeat Message Failure
----------------------
How are repeat errors handled? This is a problem with the system - if e.g. an image or malformed message kills a service then it will get requeued, and will eventually kill all instances of the service running (which will get re-spawned, and then die, and so forth).
We have a special service that looks for repeat failures and moves them to a special "Dead Letter Queue". This service is called Schlockmeister_, and is the only service at time of writing that has migrated to the public zocalo repository. This service looks inside the message that got sent, extracts some basic information from the message in as safe a way as possible and repackages to the DLQ with information on what it was working on, and the "history" of where the message chain has been routed.
.. _Schlockmeister: https://github.com/DiamondLightSource/python-zocalo/tree/master/zocalo/service
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/DiamondLightSource/zocalo-python/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Zocalo could always use more documentation, whether as part of the
official Zocalo docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/DiamondLightSource/zocalo-python/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up zocalo for local development.
1. Fork the `zocalo-python` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zocalo-python.git zocalo
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zocalo
$ cd zocalo/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zocalo tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for all currently supported Python versions.
Tests will be run automatically when you create the pull request.
Tips
----
To run a subset of tests::
$ py.test tests.test_zocalo
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bumpversion patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/CONTRIBUTING.rst | CONTRIBUTING.rst |
###############
Getting Started
###############
Zocalo requires both ActiveMQ and Graylog to be setup and running. The easiest way of setting these up is via docker.
***************
Active MQ
***************
Pull and run the following image https://hub.docker.com/r/rmohr/activemq
Follow the steps on docker hub for extracting the config and data into local mounts
Configure DLQ locations, see https://activemq.apache.org/message-redelivery-and-dlq-handling for more info.
In `conf/activemq.xml` under `policyEntries` add:
.. code-block:: xml
<policyEntry queue=">">
<deadLetterStrategy>
<individualDeadLetterStrategy queuePrefix="DLQ." useQueueForQueueMessages="true"/>
</deadLetterStrategy>
</policyEntry>
Make sure to enable scheduling, in `conf/activemq.xml` in the `broker` tag add the following property:
.. code-block:: xml
schedulerSupport="true"
Its also a good idea to enable removal of unused queues, see https://activemq.apache.org/delete-inactive-destinations
In `conf/activemq.xml` in the `broker` tag add the following property:
.. code-block:: xml
schedulePeriodForDestinationPurge="10000"
Then in the `policyEntry` tag for `queue=">"` add the following properties:
.. code-block:: xml
gcInactiveDestinations="true" inactiveTimoutBeforeGC="120000"
Which will purge unused queues on a 120s basis.
Then start ActiveMQ:
.. code-block:: bash
docker run --name activemq -p 61613:61613 -p 8161:8161 \
-v "$(pwd)/conf:/opt/activemq/conf" \
-v "$(pwd)/data:/opt/activemq/data" \
rmohr/activemq
The container exposes the following ports:
.. list-table::
:header-rows: 1
* - Port
- Description
* - 61613
- Stomp transport
* - 8161
- Web Console / Jolokia REST API
A preconfigured docker image with these options applied is available here https://hub.docker.com/r/esrfbcu/zocalo-activemq
***************
Graylog
***************
This can be started easily with a docker-compose.yml. See https://docs.graylog.org/en/3.3/pages/installation/docker.html for full details.
.. code-block:: yaml
version: '3'
services:
# MongoDB: https://hub.docker.com/_/mongo/
mongo:
image: mongo:4.2
networks:
- graylog
# Elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/6.x/docker.html
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.0
environment:
- http.host=0.0.0.0
- transport.host=localhost
- network.host=0.0.0.0
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
deploy:
resources:
limits:
memory: 1g
networks:
- graylog
# Graylog: https://hub.docker.com/r/graylog/graylog/
graylog:
image: graylog/graylog:4.0
environment:
- GRAYLOG_PASSWORD_SECRET=mysecret
# Password: admin
- GRAYLOG_ROOT_PASSWORD_SHA2=8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
- GRAYLOG_HTTP_EXTERNAL_URI=http://localhost:9000/
networks:
- graylog
restart: always
depends_on:
- mongo
- elasticsearch
ports:
# Graylog web interface and REST API
- 9000:9000
# Syslog TCP
- 1514:1514
# Syslog UDP
- 1514:1514/udp
# GELF TCP
- 12201:12201
# GELF UDP
- 12201:12201/udp
networks:
graylog:
driver: bridge
Then start with:
.. code-block:: bash
docker-compose up
Graylog admin console should be available on http://localhost:9000
Port 12201 is available for python GELF logging. Configure an input in the graylog web console to enable receiving messages.
***************
Zocalo
***************
For developing create a new conda / virtual environment, clone zocalo, and install:
.. code-block:: bash
conda create -n zocalo
conda activate zocalo
git clone https://github.com/DiamondLightSource/python-zocalo
cd python-zocalo
pip install -e .
For production, install with pip:
.. code-block:: bash
pip install zocalo
***************
Configure
***************
Copy `contrib/site-configuration.yml`. At minimum `graylog` and `activemq` must be configured. Environments should be defined for `live` and `test`. Paths to recipes and drop files must also be specified. Messages are written to drop files if ActiveMQ is unavailable.
The config file to use is specified from the environment variable `ZOCALO_CONFIG`.
Sample recipes can be used:
.. code-block:: yaml
storage:
plugin: storage
zocalo.recipe_directory: .../python-zocalo/examples/recipes
===============
JMX
===============
To make use of `zocalo.queue_monitor` and `zocalo.status_monitor` JMX needs to be configured. The JMX configuration points to the Jolokia REST API. When starting ActiveMQ the logs will tells you where the REST API is running
.. code-block:: bash
INFO | ActiveMQ Jolokia REST API available at http://0.0.0.0:8161/api/jolokia/
So configuration should be
.. code-block:: yaml
port: 8161
host: localhost
base_url: api/jolokia
Username and password are the same as the web console and defined in `users.properties`
***************
Starting Up
***************
`-e test` will make use of the test environment
Start the dispatcher
.. code-block:: bash
conda activate zocalo
zocalo.service -s Dispatcher (-e test)
Start the process runner
.. code-block:: bash
zocalo.service -s Runner (-e test)
Run the test recipe:
.. code-block:: bash
zocalo.go -r example -s workingdir="$(pwd)" 1234 (-e test)
***********************
Dead Letter Queue (DLQ)
***********************
The dead letter queue is where rejected messages end up. One dlq is available per topic to easily identify where messages are being rejected. For details on dlq see https://activemq.apache.org/message-redelivery-and-dlq-handling
Messages can be purged using:
.. code-block:: bash
zocalo.dlq_purge --output-directory=/path/to/dlq (-e test)
And re-injected with:
.. code-block:: bash
zocalo.dlq_reinject dlq_file (-e test)
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/docs/getting_started.rst | getting_started.rst |
=========
Workflows
=========
Zocalo is built on top of the `python-workflows` package. This provides the facilities with which services and recipes for Zocalo are constructed.
.. image:: ./zocalo_graphic.jpg
``python-workflows`` interfaces directly with an externally provided client library for a message broker such as ActiveMQ or RabbitMQ through its ``transport`` module. Services then take messages, process them, and maybe produce some output. The outputs of services can be piped together through a recipe. Services can also be used to monitor message queues. ``python-zocalo`` runs ``python-workflows`` services and recipes, wrapping them so that they are all visible to Zocalo.
.. image:: ./zocalo_queues.jpg
This diagram illustrates the overall task management model of Zocalo. Services run continuously, consuming from the relevant queues. Recipes inside of wrappers dictate the flow of data from queue to queue and, therefore, from service to service. The nodes represent input data which is given to the service with the output of a service becoming the input for the next.
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/docs/workflows.rst | workflows.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install Zocalo, run this command in your terminal:
.. code-block:: console
$ pip install zocalo
This is the preferred method to install Zocalo, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for Zocalo can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/DiamondLightSource/zocalo-python
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/DiamondLightSource/zocalo-python/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/DiamondLightSource/zocalo-python
.. _tarball: https://github.com/DiamondLightSource/zocalo-python/tarball/master
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/docs/installation.rst | installation.rst |
=============
Configuration
=============
Zocalo will need to be customised for your specific installation to control
aspects such as the settings for the underlying messaging framework, centralised
logging, and more.
To achieve this, Zocalo supports the concept of a site configuration file.
An `example configuration file`_ is included in the Zocalo repository.
Discovery
---------
Zocalo will, by default, look for the main configuration file at the location
specified in the environment variable ``ZOCALO_CONFIG``.
You can also specify locations directly if you use Zocalo programmatically, eg.::
import zocalo.configuration
zc = zocalo.configuration.from_file("/alternative/configuration.yml")
or you can load configurations from a `YAML`_ string::
zc = zocalo.configuration.from_string("version: 1\n\n...")
Configuration file format
-------------------------
The configuration file is in `YAML`_ format. If you are not familiar with YAML
then this `YAML primer`_ may prove useful.
This documentation describes version 1 of the configuration file format. There
is currently no other version. Every site configuration file must declare its
version by including, at the top level:
.. code-block:: yaml
version: 1
Beyond the version specification every configuration file can contain three
different types of blocks:
#. plugin configurations
#. environment definitions
#. references to further configuration files
Let's look at them individually.
Plugin configurations
---------------------
Each plugin configuration block follows this basic format:
.. code-block:: yaml
some-unique-name:
plugin: plugin-name
...
The name of the plugin configuration blocks (``some-unique-name``) can be
chosen freely, and their only restriction is that they should not collide
with the names of other blocks that you configure -- otherwise the previous
definition will be overwritten.
The name of the plugin (``plugin-name``) on the other hand refers to a specific
Zocalo configuration plugin.
Through the magic of `Python entry points`_ the list of potentially available
plugins is infinite, and you can easily develop and distribute your own,
independently from Zocalo.
Just because a plugin configuration exists does not mean that it is *active*.
For this you will need to add the configuration to an environment and activate
this environment (see below under :ref:`environments`).
The configuration file may also include configurations for plugins that are
not installed. This will raise a warning when you try to enable such a plugin
configuration, but it will not cause the rest of the configuration to crash
and burn.
Zocalo already includes a few basic plugins, and others may be available to
you via other Python packages, such as `workflows`_. A few of the included
plugins are detailed here:
Storage plugin
^^^^^^^^^^^^^^
tbd.
Logging plugin
^^^^^^^^^^^^^^
This plugin allows site-wide logging configuration. For example:
.. code-block:: yaml
some-unique-name:
plugin: logging
loggers:
zocalo:
level: WARNING
workflows:
level: WARNING
verbose:
- loggers:
zocalo:
level: INFO
- loggers:
zocalo:
level: DEBUG
workflows:
level: DEBUG
would set the Python loggers ``zocalo`` and ``workflows`` to only report
messages of level ``WARNING`` and above. Apart from the additional
``plugin:``- and ``verbose:``-keys the syntax follows the
`Python Logging Configuration Schema`_. This allows not only the setting of
log levels, but also the definition of log handlers, filters, and formatters.
A plugin definition will, by default, overwrite any previous logging
configuration. While it is fundamentally possible to combine multiple
configurations (using the ``incremental`` key), this will cause all sorts of
problems and is therefore strongly discouraged.
Please note that Zocalo commands will currently always add a handler to log
to the console. This behaviour may be reviewed in the future.
The Zocalo configuration object exposes a facility to read out and increase
a verbosity level, which will apply incremental changes to the logging
configuration. In the above example setting ``zc.logging.verbosity = 1``
would change the log level for ``zocalo`` to ``INFO`` while leaving
``workflows`` at ``WARNING``. Setting ``zc.logging.verbosity = 2`` would
change both to ``DEBUG``.
Note that the verbosity level cannot be decreased, and due to the Python
Logging model verbosity changes should be done close to the initial logging
setup, as otherwise child loggers may have been set up inheriting previous
settings.
The logging plugin offers two Graylog handlers (``GraylogUDPHandler``,
``GraylogTCPHandler``). These are based on `graypy`_, but offer slightly
improved performance by front-loading DNS lookups and apply a patch to
``graypy`` to ensure syslog levels are correctly reported to Graylog.
To use these handlers you can declare them as follows:
.. code-block:: yaml
some-unique-name:
plugin: logging
handlers:
graylog:
(): zocalo.configuration.plugin_logging.GraylogUDPHandler
host: example.com
port: 1234
root:
handlers: [ graylog ]
The logging plugin offers a log filter (``DowngradeFilter``), which can
be attached to loggers to reduce the severity of messages. It takes two
parameters, ``reduce_to`` (default: ``WARNING``) and ``only_below``
(default: ``CRITICAL``), and messages with a level between ``reduce_to``
and ``only_below`` have their log level changed to ``reduce_to``:
.. code-block:: yaml
some-unique-name:
plugin: logging
filters:
downgrade_all_warnings_and_errors:
(): zocalo.configuration.plugin_logging.DowngradeFilter
reduce_to: INFO
loggers:
pika:
filters: [ downgrade_all_warnings_and_errors ]
Graylog plugin
^^^^^^^^^^^^^^
This should be considered deprecated and will be removed at some point in the
future. Use the Logging plugin instead.
.. _environments:
Environment definitions
-----------------------
.. code-block:: yaml
environments:
env-name:
plugins:
- some-unique-name
- ...
Environments aggregate multiple plugin configuration blocks together, and
environments are what you load to set up specific plugin configurations.
The environment names (``env-name``) can again be chosen freely. Underneath
environments you can optionally declare groups (here: ``plugins``). These
groups affect the order in which the plugin configurations take effect, and
they also play a role when a configuration file is split up across multiple
files. If you don't specify a group name then the default group name
``plugins`` is used.
Groups are loaded alphabetically, with one exception: ``plugins`` is special
and is always loaded last. Within each group the plugin configurations are
loaded in the specified order.
A special environment name is ``default``, which is the environment that will
be loaded if no other environment is loaded. You can use aliasing (see below
under :ref:`environment_aliases`) to point ``default`` to a different, more
self-explanatory environment name.
.. _environment_aliases:
Environment aliases
^^^^^^^^^^^^^^^^^^^
You can create aliases for environment names by just giving the name of the
underlying environment name. You can only do pure aliasing here, you can not
override parts of the referenced environment at this time.
This configuration gives you an ``alias`` environment, that is exactly
identical to the environment named ``real``:
.. code-block:: yaml
environments:
real:
plugins:
- ...
alias: real
Aliases are resolved immediately when they are encountered. The aliased
environment therefore has to be specified in the same configuration file.
References to further files
---------------------------
tbd.
========================
Writing your own plugins
========================
tbd.
.. _Python Logging Configuration Schema: https://docs.python.org/3/library/logging.config.html#dictionary-schema-details
.. _Python entry points: https://amir.rachum.com/blog/2017/07/28/python-entry-points/
.. _YAML primer: https://getopentest.org/reference/yaml-primer.html
.. _YAML: https://en.wikipedia.org/wiki/YAML
.. _example configuration file: https://github.com/DiamondLightSource/python-zocalo/blob/main/contrib/site-configuration.yml
.. _graypy: https://pypi.org/project/graypy/
.. _workflows: https://github.com/DiamondLightSource/python-workflows/tree/main/src/workflows/util/zocalo
| zocalo | /zocalo-0.30.1.tar.gz/zocalo-0.30.1/docs/siteconfig.rst | siteconfig.rst |
Zocrypt
===============
Intended mainly for use by **ZeroAndOne Developers** for protection of data with 6 level encryption.
Based on our project `secret message encoder decoder <https://Secret-Message-Encoder-Decoder.itszeroandone.repl.co>`_
Installing
============
.. code-block:: bash
pip install zocrypt
Usage
=====
.. code-block:: bash
>>> from zocrypt import encrypter,decrypter,key
>>> from encrypter import encrypt_text
>>> from decrypter import decrypt_text
>>> text="5 Mangoes are better than 6 Mangoes"
>>> key=key.generate()
>>> encrypt_text(text,key)
'`"V`O/i|;^a^.~k|4~k|;a|R#`k|l`V~#^#^V~Hk~V|l/a|k^"~V/O/i^;|a^.`k3'
>>> decrypt_text(text,key)
'5 Mangoes are better than 6 Mangoes'
| zocrypt | /zocrypt-1.3.25.tar.gz/zocrypt-1.3.25/README.rst | README.rst |
# Zenseact Open Dataset
[](https://pypi.org/project/zod/#history)
[](https://pypi.org/project/zod/)
[](https://pypistats.org/packages/zod)
The Zenseact Open Dataset (ZOD) is a large multi-modal autonomous driving dataset developed by a team of researchers at [Zenseact](https://zenseact.com/). The dataset is split into three categories: *Frames*, *Sequences*, and *Drives*. For more information about the dataset, please refer to our [coming soon](), or visit our [website](https://zod.zenseact.com).
## Examples
Find examples of how to use the dataset in the [examples](examples/) folder. Here you will find a set of jupyter notebooks that demonstrate how to use the dataset, as well as an example of how to train an object detection model using [Detectron2](https://github.com/facebookresearch/detectron2).
## Installation
The install the library with minimal dependencies, for instance to be used in a training environment without need for interactivity och visualization, run:
```bash
pip install zod
```
To install the library along with the CLI, which can be used to download the dataset, convert between formats, and perform visualization, run:
```bash
pip install "zod[cli]"
```
To install the full devkit, with the CLI and all dependencies, run:
```bash
pip install "zod[all]"
```
## Download using the CLI
This is an example of how to download the ZOD Frames mini-dataset using the CLI. Prerequisites are that you have applied for access and received a download link.
The simplest way to download the dataset is to use the CLI interactively:
```bash
zod download
```
This will prompt you for the required information, present you with a summary of the download, and then ask for confirmation. You can of course also specify all the required information directly on the command line, and avoid the confirmation using `--no-confirm` or `-y`. For example:
```bash
zod download -y --url="<download-link>" --output-dir=<path/to/outputdir> --subset=frames --version=mini
```
By default, all data streams are downloaded for ZodSequences and ZodDrives. For ZodFrames, DNAT versions of the images, and surrounding (non-keyframe) lidar scans are excluded. To download them as well, run:
```bash
zod download -y --url="<download-link>" --output-dir=<path/to/outputdir> --subset=frames --version=full --num-scans-before=-1 --num-scans-after=-1 --dnat
```
If you want to exclude some of the data streams, you can do so by specifying the `--no-<stream>` flag. For example, to download only the DNAT images, infos, and annotations, run:
```bash
zod download --dnat --no-blur --no-lidar --no-oxts --no-vehicle-data
```
Finally, for a full list of options you can of course run:
```bash
zod download --help
```
## Anonymization
To preserve privacy, the dataset is anonymized. The anonymization is performed by [brighterAI](https://brighter.ai/), and we provide two separate modes of anonymization: deep fakes (DNAT) and blur. In our paper, we show that the performance of an object detector is not affected by the anonymization method. For more details regarding this experiment, please refer to our [coming soon]().
## Citation
If you publish work that uses Zenseact Open Dataset, please cite [our arxiv paper](https://arxiv.org/abs/2305.02008):
```
@article{zod2023,
author = {Alibeigi, Mina and Ljungbergh, William and Tonderski, Adam and Hess, Georg and Lilja, Adam and Lindstr{\"o}m, Carl and Motorniuk, Daria and Fu, Junsheng and Widahl, Jenny and Petersson, Christoffer},
title = {Zenseact Open Dataset: A large-scale and diverse multimodal dataset for autonomous driving},
year = {2023},
journal = {arXiv preprint arXiv:2305.02008},
}
```
## Contact
For questions about the dataset, please [Contact Us](mailto:[email protected]).
## Contributing
We welcome contributions to the development kit. If you would like to contribute, please open a pull request.
## License
**Dataset**:
This dataset is the property of Zenseact AB (© 2023 Zenseact AB) and is licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/). Any public use, distribution, or display of this dataset must contain this notice in full:
> For this dataset, Zenseact AB has taken all reasonable measures to remove all personally identifiable information, including faces and license plates. To the extent that you like to request the removal of specific images from the dataset, please contact [[email protected]](mailto:[email protected]).
**Development kit**:
This development kit is the property of Zenseact AB (© 2023 Zenseact AB) and is licensed under [MIT](https://opensource.org/licenses/MIT).
| zod | /zod-0.2.9.tar.gz/zod-0.2.9/README.md | README.md |
# ZODB context manager
## Installation
`pip install zodb-cm`
### ZConnection()
- Provides a ZODB connection with auto-abort (default).
- Provides a tuple of connection and root object:
```
with ZConnection(db) as cx, root:
root.one = "ok"
```
- ZConnection implements a connection context manager.
- Transaction context managers in contrast do auto-commit:
1. with db.transaction() as connection, or
1. with cx.transaction_manager as transaction, or
1. with transaction.manager as transaction (for the thread-local transaction manager)
- See also http://www.zodb.org/en/latest/guide/transactions-and-threading.html
### ZDatabase()
- Provides a ZODB database context manager.
| zodb-cm | /zodb-cm-0.1.tar.gz/zodb-cm-0.1/README.md | README.md |
==========
Change log
==========
0.6 (2018-06-05)
================
- Update requirements to ``ZODB >= 4`` as with an older version the migration
cannot be done successfully.
(`#13 <https://github.com/gocept/zodb.py3migrate/issues/13>`_)
0.5 (2017-01-17)
================
- Release as wheel and include all files in release.
- Ensure compatibility with ``setuptools >= 30``.
0.4 (2016-10-29)
================
- Fix brown bag release.
0.3 (2016-10-29)
================
- Fixes for issues #4 and #5: Converted ZODB ist now actually saved,
using additional subtransactions improves the memory footprint.
0.2 (2016-06-08)
================
- Split up the two functions previously united in the script
``bin/zodb-py3migrate`` into ``bin/zodb-py3migrate-analyze`` resp.
``bin/zodb-py3migrate-convert``.
- Add new options to the analysis script:
- ``--start`` to start the analysis with a predefined OID.
- ``--limit`` to stop the analysis after a certain amount of seen OIDs.
0.1 (2016-05-19)
================
* Initial release.
| zodb.py3migrate | /zodb.py3migrate-0.6.tar.gz/zodb.py3migrate-0.6/CHANGES.rst | CHANGES.rst |
.. image:: https://travis-ci.org/gocept/zodb.py3migrate.svg?branch=master
:target: https://travis-ci.org/gocept/zodb.py3migrate.svg
.. image:: https://readthedocs.org/projects/zodbpy3migrate/badge/?version=latest
:target: https://zodbpy3migrate.readthedocs.io
:alt: Documentation Status
.. image:: https://img.shields.io/pypi/v/zodb.py3migrate.svg
:target: https://pypi.org/project/zodb.py3migrate/
:alt: PyPI
.. image:: https://img.shields.io/pypi/pyversions/zodb.py3migrate.svg
:target: https://pypi.org/project/zodb.py3migrate/
:alt: Python versions
===================================
zodb.py3migrate - ZODB and Python 3
===================================
If you have a ZODB_ database written using Python 2.x this package helps you to
get your database ready for using it with Python 3. It is able to:
* display objects which need to be converted,
* do most of the the conversion for you,
* and switch the database file to be readable in a process running Python 3.
This package is compatible with Python version 2.7.
The documentation is at https://zodbpy3migrate.readthedocs.io.
.. _ZODB : http://zodb.org
| zodb.py3migrate | /zodb.py3migrate-0.6.tar.gz/zodb.py3migrate-0.6/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --buildout-version, "
"the bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zodb.py3migrate | /zodb.py3migrate-0.6.tar.gz/zodb.py3migrate-0.6/bootstrap.py | bootstrap.py |
from .migrate import print_results, get_argparse_parser, get_format_string
from .migrate import get_classname, find_obj_with_binary_content, run
import ConfigParser
import collections
import logging
import zodbpickle
import transaction
log = logging.getLogger(__name__)
def convert_storage(storage, mapping, verbose=False):
"""Iterate ZODB objects with binary content and apply mapping."""
result = collections.defaultdict(int)
errors = collections.defaultdict(int)
for obj, data, key, value, type_ in find_obj_with_binary_content(
storage, errors):
klassname = get_classname(obj)
dotted_name = get_format_string(obj).format(**locals())
encoding = mapping.get(dotted_name, None)
if encoding is None or type_ == 'key':
continue
if encoding == 'zodbpickle.binary':
data[key] = zodbpickle.binary(value)
else:
data[key] = value.decode(encoding)
obj._p_changed = True
result[dotted_name] += 1
transaction.commit()
return result, errors
def read_mapping(config_path):
"""Create mapping from INI file.
It maps the section options to the name of their section, thus a
configuration like below results in a mapping {'foo.Bar.baz': 'utf-8'}.
[utf-8]
foo.Bar.baz
"""
parser = ConfigParser.ConfigParser(allow_no_value=True)
parser.optionxform = str # avoid lower casing of option names
parser.read(config_path)
mapping = {}
for section in parser.sections():
mapping.update(dict.fromkeys(parser.options(section), section))
return mapping
def convert(storage, config_path, verbose=False):
"""Convert binary strings according to mapping read from config file."""
mapping = read_mapping(config_path)
print_results(
*convert_storage(storage, mapping, verbose=verbose),
verb='Converted', verbose=verbose)
def main(args=None):
"""Entry point for the convert script."""
parser = get_argparse_parser(
"Convert binary fields in a ZODB FileStorage so it can be used with "
"Python 3.""")
group = parser.add_argument_group('Convert options')
group.add_argument(
'-c', '--config', help='Path to conversion config file.')
run(parser, convert, 'config', 'verbose', args=args) | zodb.py3migrate | /zodb.py3migrate-0.6.tar.gz/zodb.py3migrate-0.6/src/zodb/py3migrate/convert.py | convert.py |
from .migrate import print_results, get_argparse_parser, get_format_string
from .migrate import get_classname, find_obj_with_binary_content, run
import collections
import logging
import transaction
log = logging.getLogger(__name__)
def analyze_storage(storage, verbose=False, start_at=None, limit=None):
"""Analyze a ``FileStorage``.
Returns a tuple `(result, errors)`
Where
`result` is a dict mapping a dotted name of an attribute to the
number of occurrences in the storage and
`errors` is a dict mapping a dotted name of a class those instances have
no `__dict__` to the number of occurrences.
"""
result = collections.defaultdict(int)
errors = collections.defaultdict(int)
for obj, data, key, value, type_ in find_obj_with_binary_content(
storage, errors, start_at=start_at, limit=limit):
klassname = get_classname(obj)
format_string = get_format_string(
obj, display_type=True, verbose=verbose)
result[format_string.format(**locals())] += 1
return result, errors
def analyze(storage, verbose=False, start_at=None, limit=None):
"""Analyse a whole file storage and print out the results."""
transaction.doom()
results = analyze_storage(
storage, verbose=verbose, start_at=start_at, limit=limit)
print_results(*results, verb='Found', verbose=verbose)
def main(args=None):
"""Entry point for the analyze script."""
parser = get_argparse_parser(
"Analyze binary fields in a ZODB FileStorage that need a conversion "
"before this FileStorage can be used with Python 3.""")
group = parser.add_argument_group('Analyze options')
group.add_argument(
'--start', default=None,
help='OID to start analysis with. Default: start with first OID in '
'storage.')
group.add_argument(
'--limit', default=None, type=int,
help='Analyze at most that many objects. Default: no limit')
run(parser, analyze, 'verbose', 'start', 'limit', args=args) | zodb.py3migrate | /zodb.py3migrate-0.6.tar.gz/zodb.py3migrate-0.6/src/zodb/py3migrate/analyze.py | analyze.py |
from ZODB.DB import DB
import BTrees.IOBTree
import BTrees.LOBTree
import BTrees.OIBTree
import BTrees.OLBTree
import BTrees.OOBTree
import ZODB.FileStorage
import ZODB.POSException
import argparse
import collections
import logging
import pdb # noqa
import transaction
import persistent
import zodbpickle
log = logging.getLogger(__name__)
def wake_object(obj):
"""Wake the object so its `__dict__` gets filled."""
try:
getattr(obj, 'some_attribute', None)
except ZODB.POSException.POSKeyError as e:
# For example if a ZODB Blob was not found.
log.error('POSKeyError: %s', e)
def is_container(obj):
return isinstance(obj, (
BTrees.IOBTree.IOBTree,
BTrees.LOBTree.LOBTree,
BTrees.OIBTree.OIBTree,
BTrees.OLBTree.OLBTree,
BTrees.OOBTree.OOBTree,
persistent.mapping.PersistentMapping,
persistent.list.PersistentList))
def is_treeset(obj):
return isinstance(obj, (
BTrees.IOBTree.IOTreeSet,
BTrees.LOBTree.LOTreeSet,
BTrees.OIBTree.OITreeSet,
BTrees.OLBTree.OLTreeSet,
BTrees.OOBTree.OOTreeSet))
def get_data(obj):
"""Return data of object. Return `None` if not possible.
We try to fetch data by reading __dict__, but this is not possible for
`BTree`s. Call `keys` or `items` on obj respectively.
"""
result = None
if is_container(obj):
result = obj
elif is_treeset(obj):
result = dict.fromkeys(obj.keys())
else:
try:
result = vars(obj)
except TypeError:
pass
return result
def find_binary(value):
"""Return type if value is or contains binary strings. None otherwise."""
if isinstance(value, persistent.Persistent):
# Avoid duplicate analysis of the same object and circular references
return None
if isinstance(value, zodbpickle.binary):
# Already marked as binary, skip.
return None
if isinstance(value, str):
try:
value.decode('ascii')
except UnicodeDecodeError:
return 'string'
else:
return None
elif isinstance(value, collections.Mapping):
for k, v in value.items():
if find_binary(k) or find_binary(v):
return 'dict'
elif hasattr(value, '__iter__'):
try:
for v in value:
if find_binary(v):
return 'iterable'
except TypeError:
# e. g. <type 'tuple'> has __iter__ but as it is a class it can
# not be called successfully.
pass
return None
def get_classname(obj):
return obj.__class__.__module__ + '.' + obj.__class__.__name__
def get_items(obj):
"""Get the items of a dict-like or list-like object."""
if hasattr(obj, 'items'):
items = obj.items()
else:
items = enumerate(obj)
return items
def find_obj_with_binary_content(
storage, errors, start_at=None, limit=None, watermark=10000):
"""Generator which finds objects in `storage` having binary content.
Yields tuple: (object, data, key-name, value, type)
`type` can be one of 'string', 'dict', 'iterable', 'key'.
"""
db = DB(storage)
connection = db.open()
if start_at is not None:
next = ZODB.utils.repr_to_oid(start_at)
else:
next = None # first OID in storage
len_storage = len(storage)
log.warn('Analyzing about %s objects.', len_storage)
count = 0
run = True
while run:
oid, tid, data, next = storage.record_iternext(next)
if next is None:
run = False
obj = connection.get(oid)
klassname = get_classname(obj)
wake_object(obj)
data = get_data(obj)
if data is None:
errors[klassname] += 1
continue
for key, value in get_items(data):
try:
type_ = find_binary(value)
if type_ is not None:
yield obj, data, key, value, type_
type_ = find_binary(key)
if type_ is not None:
yield obj, data, key, key, 'key'
except Exception:
log.error('Could not execute %r', value, exc_info=True)
continue
count += 1
if count % watermark == 0:
log.warn('%s of about %s objects analyzed.', count, len_storage)
transaction.savepoint()
connection.cacheMinimize()
if limit is not None and count >= limit:
return
def get_format_string(obj, display_type=False, verbose=False):
format_string = ''
if is_treeset(obj) or is_container(obj):
format_string = '{klassname}[{key!r}]'
else:
format_string = '{klassname}.{key}'
if display_type:
format_string += ' is {type_}%s' % (
': {value!r:.30}' if verbose else '')
return format_string
def print_results(result, errors, verb, verbose):
"""Print the analysis results."""
if verbose:
print ("Found {} classes whose objects do not have __dict__: "
"(number of occurrences)".format(len(errors)))
for key, value in sorted_by_key(errors):
print "{} ({})".format(key, value)
print
print "# ########################################################### #"
print
print "{} {} binary fields: (number of occurrences)".format(
verb, len(result))
for key, value in sorted_by_key(result):
print "{} ({})".format(key, value)
def sorted_by_key(dict):
"""Get dict entries sorted by the key."""
for key in sorted(dict):
yield key, dict[key]
def get_argparse_parser(description):
"""Return an ArgumentParser with the default configuration."""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'zodb_path', help='Path to Data.fs', metavar='Data.fs')
group = parser.add_argument_group('General options')
group.add_argument(
'-b', '--blob-dir', default=None,
help='Path to the blob directory if ZODB blobs are used.')
group.add_argument(
'-v', '--verbose', action='store_true',
help='Be more verbose in output')
group.add_argument(
'--pdb', action='store_true', help='Drop into a debugger on an error')
return parser
def run(parser, callable, *arg_names, **kw):
"""Parse the command line args and feed them to `callable`.
*arg_names ... command line arguments which should be used as arguments of
the `callable`.
**kw ... Only the key `args` is allowed here to override the command line
arguments in tests.
"""
logging.basicConfig(level=logging.INFO)
args = kw.pop('args', None)
assert not kw, \
"Don't know how to handle the following kwargs: {!r}".format(kw)
args = parser.parse_args(args)
try:
storage = ZODB.FileStorage.FileStorage(
args.zodb_path, blob_dir=args.blob_dir)
callable_args = [getattr(args, x) for x in arg_names]
return callable(storage, *callable_args)
except Exception:
if args.pdb:
pdb.post_mortem()
raise | zodb.py3migrate | /zodb.py3migrate-0.6.tar.gz/zodb.py3migrate-0.6/src/zodb/py3migrate/migrate.py | migrate.py |
Changes
-------
0.17.1 (2020-11-25)
~~~~~~~~~~~~~~~~~~~
- Fix interpreter crash (BTree_rangeSearch: Assertion 'highbucket != NULL'
failed) in historical state computation for large OOBTrees (GH #33).
0.17 (2020-11-24)
~~~~~~~~~~~~~~~~~
- Fix @@zodbbrowser_history when using ZEO (AttributeError:
'TransactionIterator' object has no attribute 'close', GH #31).
- Fix OOBTree history browsing that regressed in 0.11.0: it was showing only a
subset of changes (those that touched the OOBTree itself) and wasn't showing
others (those that touched deeper OOBTreeBuckets).
- Fix TypeError for SampleContainer subclasses that use a non-persistent
``__data`` attribute (GH: #18)
- Make @@zodbbrowser_history give up rendering all the details if it takes
too long (more than 10 seconds); you can force fast mode by adding ``fast``
to the query parameters, and you can force full mode by adding ``full`` to
the query parameters.
- Make it possible to turn off the (slow) history part of @@zodbbrowser
by adding ``nohist`` to the query parameters.
0.16.2 (2020-11-24)
~~~~~~~~~~~~~~~~~~~
- Incomplete fix for @@zodbbrowser_history when using ZEO, broken since 0.12
(AttributeError: 'TransactionIterator' object has no attribute 'close',
GH #31).
- Add support for Python 3.8 and 3.9.
- Drop support for Python 3.5.
0.16.1 (2019-07-30)
~~~~~~~~~~~~~~~~~~~
- Fix system error when an object refers to another object that was
added in a newer transaction (GH #29).
0.16.0 (2019-07-24)
~~~~~~~~~~~~~~~~~~~
- You can now view disassembled raw pickle data.
0.15.2 (2019-07-11)
~~~~~~~~~~~~~~~~~~~
- Stop depending on the obsolete ZODB3 metapackage from 2012. Instead
depend directly on persistent, BTrees, ZODB, and ZEO.
0.15.1 (2019-04-23)
~~~~~~~~~~~~~~~~~~~
- Dropped Python 3.4 support.
0.15.0 (2019-04-02)
~~~~~~~~~~~~~~~~~~~
- Add support for Python 3.7.
- Add support for PyPy and PyPy3.
- Support zope.security proxies in PURE_PYTHON mode.
- Use our custom __repr__ instead of the new persistent one.
- Transaction IDs in generated URLs are now in hex.
- 100% test coverage.
0.14.0 (2017-11-15)
~~~~~~~~~~~~~~~~~~~
- Add support for Python 3.4, 3.5, 3.6.
- Drop support for ZODB 3.8.
0.13.1 (2017-10-06)
~~~~~~~~~~~~~~~~~~~
- Fixed @@zodbbrowser_history with recent versions of ZODB (AttributeError:
MVCCAdapterInstance doesn't have attribute ``iterator``).
0.13.0 (2016-11-24)
~~~~~~~~~~~~~~~~~~~
- Dropped Python 2.6 support (because ZODB---more specifically BTrees---dropped
it).
- Fixed rollback to work with ``transaction`` >= 2.0 (transaction notes must be
Unicode now).
0.12.0 (2015-02-25)
~~~~~~~~~~~~~~~~~~~
- Show request URLs in history record headers (GH #7).
- Automate ZCML loading for Plone (GH #9).
- Fix standalone zodbbrowser when used with Zope 2 packages (GH #10).
0.11.2 (2015-01-09)
~~~~~~~~~~~~~~~~~~~
- Fix AttributeError: __repr__ when encountering instances of old-style
classes (GH #6).
0.11.1 (2014-12-12)
~~~~~~~~~~~~~~~~~~~
- Updated bootstrap.py (GH #3).
- Fixed @@zodbbrowser_history not seeing new transactions because the
cache is forever (GH #4).
0.11.0 (2013-05-29)
~~~~~~~~~~~~~~~~~~~
- Dropped Python 2.4 and 2.5 support.
- Migrated source from Launchpad to Github.
- Custom representation of OOBucket objects.
- Slightly better error pages when you specify an invalid/nonexistent OID.
- Handle OrderedContainers with non-persistent ``_order`` or ``_data``
attributes (I've seen the first in the wild).
- Partial fix for LP#1185175: cannot browse objects of classes that use
zope.interface.implementsOnly.
0.10.4 (2012-12-19)
~~~~~~~~~~~~~~~~~~~
- The previous release was completely broken (LP#1091716). Fix the issue,
and fix tox.ini to actually run functional tests in addition to unit tests.
0.10.3 (2012-12-06)
~~~~~~~~~~~~~~~~~~~
- Custom representation of persistent objects with no __repr__ to avoid
showing misleading memory addresses (LP#1087138).
0.10.2 (2012-11-28)
~~~~~~~~~~~~~~~~~~~
- Bugfix for POSKeyErrors when viewing BTrees of non-trivial sizes
(LP#953480). This fixes a regression introduced in version 0.10.0.
0.10.1 (2012-11-27)
~~~~~~~~~~~~~~~~~~~
- Standalone app mode uses the Zope exception formatter for easier debugging.
- Bugfix for weird LocationError: '__class__' for some containers
with custom traversal rules.
- Links to persistent objects in value representations now also use
hex OIDs.
0.10.0 (2012-02-29)
~~~~~~~~~~~~~~~~~~~
- Browsing of transaction records (@@zodb_history). Initial implementation so
far, unbelievably slow when you have large databases (LP#907900).
- ZODB Browser now avoids writing to the database even in read-write mode.
Previously when your objects had write-on-read semantics, those writes might
have snuck in.
- More descriptive page titles (LP#931115).
- Show object size in the header (LP#497780).
- Expand truncated values by clicking on them (LP#931184).
- More user-friendly representation of multiline text values.
- Update maintainer email in setup.py.
- Better error message for "address already in use" errors.
0.9.0 (2011-10-21)
~~~~~~~~~~~~~~~~~~
- Make it possible to use zodbbrowser as a plugin for Zope 2.12. Previously
you could only use the standalone zodbbrowser app with Zope 2.12 databases.
- Be more robust against exceptions happening in repr(): show the value as
"<unrepresentable Foo>" instead of erroring out.
- Make 'python -m zodbbrowser' run the standalone app on Python 2.5 and 2.7.
Note that 'python -m zodbbrowser.standalone' already worked on Python 2.4
through 2.7.
- Add an option to specify ZEO storage name (--storage NAME). Contributed by
Thierry Florac.
0.8.1 (2010-12-18)
~~~~~~~~~~~~~~~~~~
- Show tuple differences more clearly in the history. (Uses a really dumb
diff algorithm that just looks for a common prefix/suffix. Works really
well when you append to the end, or remove just a single item. I cannot
use difflib.SequenceMapper because there's no guarantee tuple items are
hashable.)
- Make it possible to locate an object by OID: press g, then type the oid
(hex and both decimal supported; even octal, should you wish to use it).
You can also find subobjects starting from a given OID by entering paths
like '0x1234/sub/object'.
- Catch and display unpickling errors of the current state, not just
historical older states.
- Handle missing interfaces that are directly provided by persistent objects.
This works for the standalone zodbbrowser application; the zope.interface
monkey-patch for this is too intrusive to install when using zodbbrowser
as a plugin.
- Made ``pip install zodbbrowser`` work properly by adding explicit
dependencies that easy_install would've picked up from setuptools extras.
Note: if you get ``AttributeError: __file__``, make sure
zope.app.applicationcontrol is at least version 3.5.9. Older versions will
not work with pip.
0.8.0 (2010-11-16)
~~~~~~~~~~~~~~~~~~
- Support all kinds of ZODB databases, not just those used by Zope 3/BlueBream
apps (LP#494987).
- Renders tuples and lists that contain large dicts better.
- Remove dependency on zope.dublincore/zope.app.dublincore (LP#622180).
0.7.2 (2010-08-13)
~~~~~~~~~~~~~~~~~~
- Fixed TypeError: int() can't convert non-string with explicit base
that could occur if no persistent objects were accessible from the request,
and no explicit oid was passed.
- Handle proxies better: when type(obj) != obj.__class__, show both.
- Handle ContainedProxy objects with their special persistence scheme.
0.7.1 (2010-03-30)
~~~~~~~~~~~~~~~~~~
- IMPORTANT BUGFIX: don't leave old object states lying around in ZODB object
cache, which could lead to DATA LOSS (LP#487243 strikes again, this time
for OrderedContainers).
I've audited the code and am fairly confident this bug is now dead dead
dead.
- Try to discard data modifications when the DB is opened read-only.
- Avoid deprecated zope.testing.doctest.
- Avoid zope.app.securitypolicy; use zope.securitypolicy.
0.7 (2009-12-10)
~~~~~~~~~~~~~~~~
- Stopped using setuptools extras; now easy_install zodbbrowser is sufficient
to run the standalone app.
0.6.1 (2009-12-09)
~~~~~~~~~~~~~~~~~~
- Compatibility with latest Zope packages, including ZODB 3.9.x.
0.6 (2009-12-07)
~~~~~~~~~~~~~~~~
- Ability to revert object state to an older version. Requires a read-write
database connection (i.e. run bin/zodbbrowser --rw). The button is hidden
and appears when you're hovering over a transaction entry in the list.
- Collapse long item lists by default.
0.5.1 (2009-11-23)
~~~~~~~~~~~~~~~~~~
- IMPORTANT BUGFIX: don't leave old object states lying around in ZODB object
cache, which could lead to DATA LOSS (LP#487243). This affected OOBTree
objects.
0.5 (2009-11-23)
~~~~~~~~~~~~~~~~
- Be a bit more tolerant to unpickling errors (show which revision could not
be loaded instead of breaking the whole page).
- Show full history of OOBTree objects and subobjects (LP#474334).
- Change background color of links on hover, to make it clear what
object you'll see when you click, especially when the __repr__ shown
contains reprs of subobjects.
- Show size of containers next to the "Items" heading (LP#486910).
- Show size of containers next to their representation, e.g.
"<persistent.dict.PersistentDict object at 0xad0b3ec> (0 items)".
- Pay attention when __name__ is declared as a class attribute (LP#484899).
- Show names of directly provided interfaces on objects (i.e. show a better
representation of pickled zope.interface.Provides objects).
- Pretty-printing of dictionaries (including nested ones).
0.4 (2009-10-11)
~~~~~~~~~~~~~~~~
- @@zodbbrowser oid and tid parameters now accept values in hex format (0x0123)
Patch by Adam Groszer.
0.3.1 (2009-07-17)
~~~~~~~~~~~~~~~~~~
- Fixed install error on Windows (path cannot end in /).
0.3 (2009-07-17)
~~~~~~~~~~~~~~~~
- First public release
| zodbbrowser | /zodbbrowser-0.17.1.tar.gz/zodbbrowser-0.17.1/CHANGES.rst | CHANGES.rst |
ZODB Browser
============
|buildstatus|_ |appveyor|_ |coverage|_
The ZODB browser allows you to inspect persistent objects stored in the ZODB,
view their attributes and historical changes made to them.
.. warning::
ZODB is based on Python pickles, which are not secure -- they allow
**arbitrary command execution**. Do not use zodbbrowser on databases from
untrusted sources.
.. contents::
Usage as a standalone project
-----------------------------
Install all the dependencies into the source tree with zc.buildout::
python bootstrap.py
bin/buildout
Run bin/zodbbrowser specifying either a filename or a ZEO address ::
bin/zodbbrowser /path/to/Data.fs
bin/zodbbrowser --zeo localhost:9080
bin/zodbbrowser --zeo /path/to/zeosock
If you don't have a spare Data.fs to test with, you can create a new empty
one with just the barest Zope 3 scaffolding in it::
bin/zodbbrowser empty.fs --rw
Open http://localhost:8070 in a web browser. Note that there are no
access controls; all other users on the local machine will be able to
access the database contents.
Or you could try to use ``easy_install`` or ``pip``. It may work or it may
not, depending on the current state of all the dependencies (buildout.cfg
hardcodes dependency version to a known-working-together state, called the
"Zope 3.4 Known Good Set", so buildout-based installs are safer) ::
easy_install zodbbrowser
zodbbrowser /path/to/Data.fs
Command-line options
--------------------
Run ``bin/zodbbrowser --help`` to see a full and up-to-date list of
command-line options::
Usage: zodbbrowser [options] [FILENAME | --zeo ADDRESS]
Open a ZODB database and start a web-based browser app.
Options:
-h, --help show this help message and exit
--zeo=ADDRESS connect to ZEO server instead
--listen=ADDRESS specify port (or host:port) to listen on
--rw open the database read-write (allows creation of the
standard Zope local utilities if missing)
Help! Broken objects everywhere
--------------------------------
If you don't want to see ``<persistent broken ...>`` everywhere, make sure
your application objects are importable from the Python path. The easiest way
of doing that is adding zodbbrowser to your application's buildout (or
virtualenv, if you use virtualenvs). This way your application (or Zope's)
nice __repr__ will also be used.
Online help
-----------
There's a little 'help' link in the bottom-right corner of every page that
describes the user interface in greater detail.
Usage as a plugin
-----------------
Add zodbbrowser to the list of eggs (e.g. in buildout.cfg of your app) and
add this to your site.zcml::
<include package="zodbbrowser" />
Rerun bin/buildout, restart Zope and append @@zodbbrowser to the end of the
URL to start browsing, e.g. http://localhost:8080/@@zodbbrowser. Or, if you
still use ZMI (the Zope Management Interface), look for a new menu item
titled "ZODB Browser".
Alternatives
------------
There's a package called z3c.zodbbrowser in the Zope svn repository that
implements the same idea (but without history browsing) as a GUI desktop
application written using wxPython. It doesn't have a website and was never
released to the Python Package Index.
There's also `dm.historical`__ which provides access to object history from
an interactive Python shell.
__ https://pypi.python.org/pypi/dm.historical
If you're not interested in history or objects that cannot be reached
through URL traversal, you can use the built-in object inspector that
comes with Zope 3 / Grok.
Authors
-------
ZODB Browser was developed by Tautvilas Mečinskas ([email protected]) and
Marius Gedminas ([email protected]) from `Programmers of Vilnius
<https://pov.lt/>`_. It is licenced under the `Zope Public Licence
<https://opensource.org/licenses/ZPL-2.0>`_.
Please report bugs at https://github.com/mgedmin/zodbbrowser/issues.
There's an old bugtracker at https://bugs.launchpad.net/zodbbrowser but I'd
really rather prefer new bugs in GitHub.
.. |buildstatus| image:: https://api.travis-ci.com/mgedmin/zodbbrowser.svg?branch=master
.. _buildstatus: https://travis-ci.com/mgedmin/zodbbrowser
.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/github/mgedmin/zodbbrowser?branch=master&svg=true
.. _appveyor: https://ci.appveyor.com/project/mgedmin/zodbbrowser
.. |coverage| image:: https://coveralls.io/repos/mgedmin/zodbbrowser/badge.svg?branch=master
.. _coverage: https://coveralls.io/r/mgedmin/zodbbrowser
| zodbbrowser | /zodbbrowser-0.17.1.tar.gz/zodbbrowser-0.17.1/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zodbbrowser | /zodbbrowser-0.17.1.tar.gz/zodbbrowser-0.17.1/bootstrap.py | bootstrap.py |
===========
Changelog
===========
2.5 (2022-11-03)
================
- Add support for the final Python 3.11 release.
2.4 (2022-09-15)
================
- Add support for Python 3.11 (as of 3.11.0b3).
- Disable unsafe math optimizations in C code. See `pull request 73
<https://github.com/zopefoundation/zodbpickle/pull/73>`_.
2.3 (2022-04-22)
================
- Add support for Python 3.11 (as of 3.11.0a7).
2.2.0 (2021-09-29)
==================
- Add support for Python 3.10.
2.1.0 (2021-09-24)
==================
- Add support for Python 3.9.
2.0.0 (2019-11-13)
==================
- CPython 2: Make ``zodbpickle.binary`` objects smaller and untracked
by the garbage collector. Now they behave more like the native bytes
object. Just like it, and just like on Python 3, they cannot have
arbitrary attributes or be weakly referenced. See `issue 53
<https://github.com/zopefoundation/zodbpickle/issues/53>`_.
1.1 (2019-11-09)
================
- Add support for Python 3.8.
- Drop support for Python 3.4.
1.0.4 (2019-06-12)
==================
- Fix pickle corruption under certain conditions. See `pull request 47
<https://github.com/zopefoundation/zodbpickle/pull/47>`_.
1.0.3 (2018-12-18)
==================
- Fix a bug: zodbpickle.slowpickle assigned `_Pickler` to `Unpickler`.
1.0.2 (2018-08-10)
==================
- Add support for Python 3.7.
1.0.1 (2018-05-16)
==================
- Fix a memory leak in pickle protocol 3 under Python 2. See `issue 36
<https://github.com/zopefoundation/zodbpickle/issues/36>`_.
1.0 (2018-02-09)
================
- Add a warning to the readme not to use untrusted pickles.
- Drop support for Python 3.3.
0.7.0 (2017-09-22)
==================
- Drop support for Python 2.6 and 3.2.
- Add support for Jython 2.7.
- Add support for Python 3.5 and 3.6.
0.6.0 (2015-04-02)
==================
- Restore the ``noload`` behaviour from Python 2.6 and provide the
``noload`` method on the non-C-accelerated unpicklers under PyPy and
Python 2.
- Add support for PyPy, PyPy3, and Python 3.4.
0.5.2 (2013-08-17)
==================
- Import accelerator from *our* extension module under Py3k.
See https://github.com/zopefoundation/zodbpickle/issues/6,
https://github.com/zopefoundation/zodbpickle/issues/7.
- Fix unpickler's ``load_short_binstring`` across supported platforms.
0.5.1 (2013-07-06)
==================
- Update all code and tests to Python 2.6.8, 2.7.5, 3.2.5, 3.3.2 .
- Add the modules ``zodbpickle.fastpickle`` and ``zodbpickle.slowpickle``.
This provides a version-independent choice of the C or Python
implementation.
- Fix a minor bug on OS X
0.5.0 (2013-06-14)
==================
- Removed support for the ``bytes_as_strings`` arguments to pickling APIs:
the pickles created when that argument was true might not be unpickled
without passing ``encoding='bytes'``, which ZODB couldn't reliably enforce.
On Py3k, ZODB will be using ``protocol=3`` pickles anyway.
0.4.4 (2013-06-07)
==================
- Add protocol 3 opcodes to the C version of the ``noload()`` dispatcher.
0.4.3 (2013-06-07)
==================
- Packaging error: remove spurious ``-ASIDE`` file from sdist.
0.4.2 (2013-06-07)
==================
- Fix NameError in pure-Python version of ``Unpickler.noload_appends``.
- Fix NameError in pure-Python version of ``Unpickler.noload_setitems``.
0.4.1 (2013-04-29)
==================
- Fix typo in Python2 version of ``zodbpickle.pickle`` module.
0.4 (2013-04-28)
================
- Support the common pickle module interface for Python 2.6, 2.7, 3.2, and 3.3.
- Split the Python implementations / tests into Python2- and Py3k-specific
variants.
- Added a fork of the Python 2.7 ``_pickle.c``, for use under Python2.
The fork adds support for the Py3k ``protocol 3`` opcodes.
- Added a custom ``binary`` type for use in Python2 apps.
Derived from ``bytes``, the ``binary`` type allows Python2 apps to pickle
binary data using opcodes which will cause it to be unpickled as ``bytes``
on Py3k. Under Py3k, the ``binary`` type is just an alias for ``bytes``.
0.3 (2013-03-18)
================
- Added ``noload`` code to Python 3.2 version of ``Unpickler``. As with
the Python 3.3 version, this code remains untested.
- Added ``bytes_as_strings`` option to the Python 3.2 version of
``Pickler``, ``dump``, and ``dumps``.
0.2 (2013-03-05)
================
- Added ``bytes_as_strings`` option to ``Pickler``, ``dump``, and ``dumps``.
- Incomplete support for Python 3.2:
- Move ``_pickle.c`` -> ``_pickle_33.c``.
- Clone Python 3.2.3's ``_pickle.c`` -> ``_pickle_32.c`` and apply the
same patch.
- Choose between them at build time based on ``sys.version_info``.
- Disable some tests of 3.3-only features.
- Missing: implementation of ``noload()`` in ``_pickle_32.c``.
- Missing: implementation of ``bytes_as_strings=True`` in ``_pickle_32.c``.
0.1.0 (2013-02-27)
==================
- Initial release of Python 3.3's pickle with the patches of Python
`issue 6784`__ applied.
.. __: http://bugs.python.org/issue6784#msg156166
- Added support for ``errors="bytes"``.
| zodbpickle | /zodbpickle-2.5.tar.gz/zodbpickle-2.5/CHANGES.rst | CHANGES.rst |
``zodbpickle`` README
=====================
.. image:: https://github.com/zopefoundation/zodbpickle/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zodbpickle/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zodbpickle/badge.svg
:target: https://coveralls.io/github/zopefoundation/zodbpickle
:alt: Coverage status
.. image:: https://img.shields.io/pypi/v/zodbpickle.svg
:target: https://pypi.python.org/pypi/zodbpickle
:alt: PyPI
.. image:: https://img.shields.io/pypi/pyversions/zodbpickle.svg
:target: https://pypi.python.org/pypi/zodbpickle
:alt: Python versions
This package presents a uniform pickling interface for ZODB:
- Under Python2, this package forks both Python 2.7's ``pickle`` and
``cPickle`` modules, adding support for the ``protocol 3`` opcodes.
It also provides a new subclass of ``bytes``, ``zodbpickle.binary``,
which Python2 applications can use to pickle binary values such that
they will be unpickled as ``bytes`` under Py3k.
- Under Py3k, this package forks the ``pickle`` module (and the supporting
C extension) from both Python 3.2 and Python 3.3. The fork add support
for the ``noload`` operations used by ZODB.
Caution
-------
``zodbpickle`` relies on Python's ``pickle`` module.
The ``pickle`` module is not intended to be secure against erroneous or
maliciously constructed data. Never unpickle data received from an
untrusted or unauthenticated source as arbitrary code might be executed.
Also see https://docs.python.org/3.6/library/pickle.html
General Usage
-------------
To get compatibility between Python 2 and 3 pickling, replace::
import pickle
by::
from zodbpickle import pickle
This provides compatibility, but has the effect that you get the fast implementation
in Python 3, while Python 2 uses the slow version.
To get a more deterministic choice of the implementation, use one of::
from zodbpickle import fastpickle # always C
from zodbpickle import slowpickle # always Python
Both modules can co-exist which is helpful for comparison.
But there is a bit more to consider, so please read on!
Loading/Storing Python 2 Strings
--------------------------------
In all their wisdom, the Python developers have decided that Python 2 ``str``
instances should be loaded as Python 3 ``str`` objects (i.e. unicode
strings). Patches were proposed in Python `issue 6784`__ but were never
applied. This code base contains those patches.
.. __: http://bugs.python.org/issue6784
Example 1: Loading Python 2 pickles on Python 3 ::
$ python2
>>> import pickle
>>> pickle.dumps('\xff', protocol=0)
"S'\\xff'\np0\n."
>>> pickle.dumps('\xff', protocol=1)
'U\x01\xffq\x00.'
>>> pickle.dumps('\xff', protocol=2)
'\x80\x02U\x01\xffq\x00.'
$ python3
>>> from zodbpickle import pickle
>>> pickle.loads(b"S'\\xff'\np0\n.", encoding='bytes')
b'\xff'
>>> pickle.loads(b'U\x01\xffq\x00.', encoding='bytes')
b'\xff'
>>> pickle.loads(b'\x80\x02U\x01\xffq\x00.', encoding='bytes')
b'\xff'
Example 2: Loading Python 3 pickles on Python 2 ::
$ python3
>>> from zodbpickle import pickle
>>> pickle.dumps(b"\xff", protocol=0)
b'c_codecs\nencode\np0\n(V\xff\np1\nVlatin1\np2\ntp3\nRp4\n.'
>>> pickle.dumps(b"\xff", protocol=1)
b'c_codecs\nencode\nq\x00(X\x02\x00\x00\x00\xc3\xbfq\x01X\x06\x00\x00\x00latin1q\x02tq\x03Rq\x04.'
>>> pickle.dumps(b"\xff", protocol=2)
b'\x80\x02c_codecs\nencode\nq\x00X\x02\x00\x00\x00\xc3\xbfq\x01X\x06\x00\x00\x00latin1q\x02\x86q\x03Rq\x04.'
$ python2
>>> import pickle
>>> pickle.loads('c_codecs\nencode\np0\n(V\xff\np1\nVlatin1\np2\ntp3\nRp4\n.')
'\xff'
>>> pickle.loads('c_codecs\nencode\nq\x00(X\x02\x00\x00\x00\xc3\xbfq\x01X\x06\x00\x00\x00latin1q\x02tq\x03Rq\x04.')
'\xff'
>>> pickle.loads('\x80\x02c_codecs\nencode\nq\x00X\x02\x00\x00\x00\xc3\xbfq\x01X\x06\x00\x00\x00latin1q\x02\x86q\x03Rq\x04.')
'\xff'
Example 3: everything breaks down ::
$ python2
>>> class Foo(object):
... def __init__(self):
... self.x = 'hello'
...
>>> import pickle
>>> pickle.dumps(Foo(), protocol=0)
"ccopy_reg\n_reconstructor\np0\n(c__main__\nFoo\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n(dp5\nS'x'\np6\nS'hello'\np7\nsb."
>>> pickle.dumps(Foo(), protocol=1)
'ccopy_reg\n_reconstructor\nq\x00(c__main__\nFoo\nq\x01c__builtin__\nobject\nq\x02Ntq\x03Rq\x04}q\x05U\x01xq\x06U\x05helloq\x07sb.'
>>> pickle.dumps(Foo(), protocol=2)
'\x80\x02c__main__\nFoo\nq\x00)\x81q\x01}q\x02U\x01xq\x03U\x05helloq\x04sb.'
$ python3
>>> from zodbpickle import pickle
>>> class Foo(object): pass
...
>>> foo = pickle.loads("ccopy_reg\n_reconstructor\np0\n(c__main__\nFoo\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n(dp5\nS'x'\np6\nS'hello'\np7\nsb.", encoding='bytes')
>>> foo.x
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'Foo' object has no attribute 'x'
wait what? ::
>>> foo.__dict__
{b'x': b'hello'}
oooh. So we use ``encoding='ASCII'`` (the default) and ``errors='bytes'`` and
hope it works::
>>> foo = pickle.loads("ccopy_reg\n_reconstructor\np0\n(c__main__\nFoo\np1\nc__builtin__\nobject\np2\nNtp3\nRp4\n(dp5\nS'x'\np6\nS'hello'\np7\nsb.", errors='bytes')
>>> foo.x
'hello'
falling back to bytes if necessary ::
>>> pickle.loads(b'\x80\x02U\x01\xffq\x00.', errors='bytes')
b'\xff'
Support for ``noload()``
------------------------
The ZODB uses `cPickle`'s ``noload()`` method to retrieve all persistent
references from a pickle without loading any objects. This feature was removed
from Python 3's pickle. Unfortuantely, this unnecessarily fills the pickle
cache.
This module provides a ``noload()`` method again.
| zodbpickle | /zodbpickle-2.5.tar.gz/zodbpickle-2.5/README.rst | README.rst |
set -e -x
# Running inside docker
# Set a cache directory for pip. This was
# mounted to be the same as it is outside docker so it
# can be persisted.
export XDG_CACHE_HOME="/cache"
# XXX: This works for macOS, where everything bind-mounted
# is seen as owned by root in the container. But when the host is Linux
# the actual UIDs come through to the container, triggering
# pip to disable the cache when it detects that the owner doesn't match.
# The below is an attempt to fix that, taken from bcrypt. It seems to work on
# Github Actions.
if [ -n "$GITHUB_ACTIONS" ]; then
echo Adjusting pip cache permissions
mkdir -p $XDG_CACHE_HOME/pip
chown -R $(whoami) $XDG_CACHE_HOME
fi
ls -ld /cache
ls -ld /cache/pip
# We need some libraries because we build wheels from scratch:
yum -y install libffi-devel
tox_env_map() {
case $1 in
*"cp27"*) echo 'py27';;
*"cp35"*) echo 'py35';;
*"cp36"*) echo 'py36';;
*"cp37"*) echo 'py37';;
*"cp38"*) echo 'py38';;
*"cp39"*) echo 'py39';;
*"cp310"*) echo 'py310';;
*"cp311"*) echo 'py311';;
*) echo 'py';;
esac
}
# Compile wheels
for PYBIN in /opt/python/*/bin; do
if \
[[ "${PYBIN}" == *"cp27"* ]] || \
[[ "${PYBIN}" == *"cp35"* ]] || \
[[ "${PYBIN}" == *"cp311"* ]] || \
[[ "${PYBIN}" == *"cp36"* ]] || \
[[ "${PYBIN}" == *"cp37"* ]] || \
[[ "${PYBIN}" == *"cp38"* ]] || \
[[ "${PYBIN}" == *"cp39"* ]] || \
[[ "${PYBIN}" == *"cp310"* ]] ; then
"${PYBIN}/pip" install -e /io/
"${PYBIN}/pip" wheel /io/ -w wheelhouse/
if [ `uname -m` == 'aarch64' ]; then
cd /io/
${PYBIN}/pip install tox
TOXENV=$(tox_env_map "${PYBIN}")
${PYBIN}/tox -e ${TOXENV}
cd ..
fi
rm -rf /io/build /io/*.egg-info
fi
done
# Bundle external shared libraries into the wheels
for whl in wheelhouse/zodbpickle*.whl; do
auditwheel repair "$whl" -w /io/wheelhouse/
done | zodbpickle | /zodbpickle-2.5.tar.gz/zodbpickle-2.5/.manylinux-install.sh | .manylinux-install.sh |
=========
Changes
=========
0.8.0 (2019-11-12)
==================
- Fix ``--min-objects``. Previously it did nothing.
- Add ``--pack`` to pack each storage before running.
- Let the ``--log`` option take a path to a ZConfig logging
configuration file that will be used to configure logging. This
allows fine-grained control over log levels.
- Add a benchmark (``prefetch_cold``) to test the effect of bulk
prefetching objects into the storage cache.
- Add a benchmark (``readCurrent``) to test the speed of using
``Connection.readCurrent`` (specifically, to see how well it can
parallelize).
- Add a benchmark (``tpc``) that explicitly (and only) tests moving
through the three phases of a successful transaction commit on a storage.
- Make pre-loading objects with ``--min-objects`` faster by using
pre-serialized object data.
- Increase the default size of objects to 300 bytes, and make it the
same on Python 2 and Python 3. This closely matches the measurement
of the average object size in a large production database (30
million objects).
- Add a benchmark for allocating new OIDs. See :issue:`47`.
- Add a benchmark for conflict resolution, designed to emphasize
parallel commit. See :issue:`46`.
- Add a benchmark focused *just* on storing new objects, eliminating
the pickling and OID allocation from the timing. See :issue:`49`.
- Enhance the transaction commit benchmarks to show the difference
between implicit commit and explicit commit. ZODB makes extra
storage calls in the implicit case.
- Add support for Python 3.8.
- Allow excluding particular benchmarks on the command line. For
example, ``-cold``.
- When benchmarking multiple ZODB configurations, run a particular
benchmark for all databases before moving on to the next benchmark. Previously
all benchmarks for a database were run before moving on to the next
database. This makes it a bit easier to eyeball results as the
process is running.
0.7.0 (2019-05-31)
==================
- Drop support for Python 3.4.
- Add support for Python 3.7.
- The timing loops have been rewritten on top of `pyperf
<https://pyperf.readthedocs.io/en/latest/index.html>`_. This
produces much more reliable/stable, meaningful data with a richer set of
statistics information captured, and the ability to do analysis and
comparisons on data files captured after a run is complete. Some
command line options have changed as a result of this, and the
output no longer is in terms of "objects per second" but how long a
particular loop operation takes. See :issue:`37` and :issue:`35`.
- The timing data for in-process concurrency (gevent and threads)
attempts to take the concurrency into account to produce more
accurate results. See :issue:`38`.
- Add debug logging when we think we detect gevent-cooperative and
gevent-unaware databases in gevent mode.
- Add the ability to specify only certain subsets of benchmarks to run
on the command line. In particular, if you've already run the
``add`` benchmark once, you can run other benchmarks such as the
``cold`` benchmark again independently as many times as you want (as
long as you don't ``zap`` the database; that's not allowed).
- The benchmarks more carefully verify that they tested what they
wanted to. For example, they check that their Connection's load count
matches what it should be (0 in the case of the "steamin" test).
- Profiling in gevent mode captures the entire set of data in a single
file. See :issue:`33`.
- The ``--zap`` option accepts a ``force`` argument to eliminate the
prompts. See :issue:`36`.
- Multi-threaded runs handle exceptions and signals more reliably.
Partial fix for :issue:`26`.
- Shared thread read tests clear the caches of connections and the
database in a more controlled way, more closely modeling the
expected behaviour. Previously the cache clearing was
non-deterministic. See :issue:`28`.
- When using gevent, use its Event and Queue implementations for
better cooperation with the event loop.
- Add ``--min-objects`` option to ensure that the underlying database
has at least a set number of objects in place. This lets us test
scaling issues and be more repeatable. This is tested with
FileStorage, ZEO, and RelStorage (RelStorage 2.1a2 or later is
needed for accurate results; earlier versions will add new objects
each time, resulting in database growth).
- Remove the unmaintained buildout configuration. See :issue:`25`.
- Add an option to test the performance of blob storage. See
:issue:`29`.
- Add support for zapping file storages. See :issue:`43`.
- When zapping, do so right before running the 'add' benchmark. This
ensures that the databases are all the same size even when the same
underlying storage (e.g., MySQL databas) is used multiple times in a
configuration. Previously, the second and further uses of the same
storage would not be zapped and so would grow with the data from the
previous contender tests. See :issue:`42`.
- Add a benchmark for empty transaction commits. This tests the
storage synchronization --- in RelStorage, it tests polling the
RDBMS for invalidations. See :issue:`41`.
- Add support for using `vmprof <https://vmprof.readthedocs.io>`_ to
profile, instead of :mod:`cProfile`. See :issue:`34`.
0.6.0 (2016-12-13)
==================
This is a major release that focuses on providing more options to fine
tune the testing process that are expected to be useful to both
deployers and storage authors.
A second major focus has been on producing more stable numeric
results. As such, the results from this version *are not directly
comparable* to results obtained from a previous version.
Platforms
---------
- Add support for Python 3 (3.4, 3.5 and 3.6) and PyPy. Remove support
for Python 2.6 and below.
- ZODB 4 and above are the officially supported versions. ZODB 3 is no
longer tested but may still work.
Incompatible Changes
--------------------
- Remove support for Python 2.6 and below.
- The old way of specifying concurrency levels with a comma separated
list is no longer supported.
Command Line Tool
-----------------
The help output and command parsing has been much improved.
- To specify multiple concurrency levels, specify the ``-c`` option
multiple times. Similarly, to specify multiple object counts,
specify the ``-n`` option multiple times. (For example, ``-c 1 -c 2 -n 100
-n 200`` would run four comparisons). The old way of separating numbers with
commas is no longer supported.
- Add the ``--log`` option to enable process logging. This is useful
when using zodbshootout to understand changes in a single storage.
- Add ``--zap`` to rebuild RelStorage schemas on startup. Useful when
switching between Python 2 and Python 3.
- The reported numbers should be more stable, thanks to running
individual tests more times (via the ``--test-reps`` option) and
taking the mean instead of the min.
- Add ``--dump-json`` to write a JSON representation of more detailed
data than is present in the default CSV results.
Test Additions
--------------
- Add support for testing with BTrees (``--btrees``). This is
especially helpful for comparing CPython and PyPy, and is also
useful for understanding BTree behaviour.
- Add support for testing using threads instead of multiprocessing
(``--threads``). This is especially helpful on PyPy or when testing
concurrency of a RelStorage database driver and/or gevent. Databases
may be shared or unique for each thread.
- Add support for setting the repetition count (``--test-reps``). This
is especially helpful on PyPy.
- Use randomized data for the objects instead of a constant string.
This lets us more accurately model effects due to compression at the
storage or network layers.
- When gevent is installed, add support for testing with the system
monkey patched (``--gevent``). (Note: This might not be supported by all storages.)
- Add ``--leaks`` to use `objgraph <http://mg.pov.lt/objgraph/>`_ to
show any leaking objects at the end of each test repetition. Most
useful to storage and ZODB developers.
Other
-----
- Enable continuous integration testing on Travis-CI and coveralls.io.
- Properly clear ZEO caches on ZODB5. Thanks to Jim Fulton.
- Improve installation with pip. Extras are provided to make testing
RelStorage as easy as testing FileStorage and ZEO.
- The documentation is now hosted at http://zodbshootout.readthedocs.io/
0.5 (2012-09-08)
================
- Updated to MySQL 5.1.65, PostgreSQL 9.1.5, memcached 1.4.15,
and libmemcached 1.0.10.
- Moved development to github.
0.4 (2011-02-01)
================
- Added the --object-size parameter.
0.3 (2010-06-19)
================
- Updated to memcached 1.4.5, libmemcached 0.40, and pylibmc 1.1+.
- Updated to PostgreSQL 8.4.4.
- Updated to MySQL 5.1.47 and a new download url - the old was giving 401's.
0.2 (2009-11-17)
================
- Buildout now depends on a released version of RelStorage.
0.1 (2009-11-17)
================
- Initial release.
| zodbshootout | /zodbshootout-0.8.0.tar.gz/zodbshootout-0.8.0/CHANGES.rst | CHANGES.rst |
This application measures and compares the performance of various
ZODB storages and configurations. It is derived from the RelStorage
speedtest script, but this version allows arbitrary storage types and
configurations, provides more measurements, and produces numbers that
are easier to interpret.
===============
Documentation
===============
.. image:: https://readthedocs.org/projects/zodbshootout/badge/?version=latest
:target: http://zodbshootout.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
`Documentation`_ including `installation instructions`_ is hosted on `readthedocs`_.
The complete `changelog`_ is also there.
.. _`Documentation`: http://zodbshootout.readthedocs.io/en/latest/
.. _`installation instructions`: http://zodbshootout.readthedocs.io/en/latest/install.html
.. _`readthedocs`: http://zodbshootout.readthedocs.io/en/latest/
.. _`changelog`: http://zodbshootout.readthedocs.io/en/latest/changelog.html
=============
Development
=============
.. image:: https://travis-ci.org/zodb/zodbshootout.png?branch=master
:target: https://travis-ci.org/zodb/zodbshootout
.. image:: https://coveralls.io/repos/zodb/zodbshootout/badge.svg?branch=master&service=github
:target: https://coveralls.io/github/zodb/zodbshootout?branch=master
zodbshootout is hosted at GitHub:
https://github.com/zodb/zodbshootout
| zodbshootout | /zodbshootout-0.8.0.tar.gz/zodbshootout-0.8.0/README.rst | README.rst |
==========================
Running ``zodbshootout``
==========================
Executable
==========
.. highlight:: shell
``zodbshootout`` can be executed in one of two ways. The first and
most common is via the ``zodbshootout`` script created by pip or
buildout::
# In an active environment with zodbshootout on the path
$ zodbshootout ...arguments...
# in a non-active virtual environment
$ path/to/venv/bin/zodbshootout ...arguments...
``zodbshootout`` can also be directly invoked as a module using the
python interpreter where it is installed:
python -m zodbshootout
This documentation will simply refer to the ``zodbshootout`` script,
but both forms are equivalent.
.. tip::
For the most repeatable, stable results, it is important to choose
a fixed value for the hash seed used for Python's builtin objects
(str, bytes, etc). On CPython 2, this means not passing the ``-R``
argument to the interpreter, and not having the `PYTHONHASHSEED
<https://docs.python.org/2/using/cmdline.html#envvar-PYTHONHASHSEED>`_
environment variable set to ``random``. On CPython 3, this means
having the `PYTHONHASHSEED
<https://docs.python.org/2/using/cmdline.html#envvar-PYTHONHASHSEED>`_
environment variable set to a fixed value. On a Unix-like system,
this invocation will work for both versions::
$ PYTHONHASHSEED=0 zodbshootout ...arguments...
Configuration File
==================
The ``zodbshootout`` script requires the name of a database
configuration file. The configuration file contains a list of
databases to test, in ZConfig format. The script then writes and reads
each of the databases while taking measurements. During this process,
the measured times are output for each test of each database; there
are a number of command-line options to control the output or save it
to files for later analysis. (See the `pyperf user guide
<https://pyperf.readthedocs.io/en/latest/user_guide.html>`_ for
information on configuring the output and adjusting the benchmark
process.)
.. highlight:: xml
An example of a configuration file testing the built-in ZODB file
storage, a few variations of ZEO, and `RelStorage <http://relstorage.readthedocs.io/en/latest/configure-application.html#configuring-repoze-zodbconn>`_
would look like this:
.. literalinclude:: ../samples/fs-sample.conf
:language: guess
The corresponding ZEO configuration file would look like this:
.. literalinclude:: ../samples/zeo.conf
:language: guess
.. note::
If you'll be using RelStorage, you'll need to have the appropriate
RDBMS processes installed, running, and properly configured. Likewise,
if you'll be using ZEO, you'll need to have the ZEO server running.
For pointers to more information, see :doc:`install`.
Options
=======
.. highlight:: guess
The ``zodbshootout`` script accepts the following options. A
description of each option follows the text output.
.. command-output:: zodbshootout --help
.. versionchanged:: 0.7
You can now specify just a subset of benchmarks to run
by giving their names as extra command line arguments after the
configuration file.
Objects
-------
These options control the objects put in the database.
* ``--object-counts`` specifies how many persistent objects to
write or read per transaction. The default is 1000.
.. versionchanged:: 0.7
The old alias of ``-n`` is no longer accepted; pyperf uses that
to determine the number of loop iterations.
Also, this can now only be used once.
.. versionchanged:: 0.6
Specify this option more than once to run the
tests with different object counts.
* ``--btrees`` causes the data to be stored in the BTrees optimized
for ZODB usage (without this option, a PersistentMapping will be
used). This is an advanced option that may be useful when tuning
particular applications and usage scenarios. This adds additional
objects to manage the buckets that make up the BTree. However, if
IO BTrees are used (the default when this option is specified)
internal storage of keys as integers may reduce pickle times and
sizes (and thus improve cache efficiency). This option can take an
argument of either IO or OO to specify the type of BTree to use.
This option is especially interesting on PyPy or when comparing the
pure-Python implementation of BTrees to the C implementation.
.. versionadded:: 0.6
* ``--zap`` recreates the tables and indexes for a RelStorage database
or a ZODB FileStorage. *This option completely destroys any existing
data.* You will be prompted to confirm that you want to do this for
each database that supports it. This is handy for comparing Python 2
and Python 3 (which can't otherwise use the same database schemas).
.. caution:: This option destroys all data in the relevant database.
.. versionchanged:: 0.7
You can now specify an argument of ``force`` to disable the
prompt and zap all databases. You can also give a comma separated
list of database names to zap; only those databases will be
cleared (without prompting).
.. versionadded:: 0.6
* ``--min-objects`` ensures that at least the specified number of
objects exist in the database independently of the objects being
tested. If the database packs away objects or if ``--zap`` is used,
this option will add back the necessary number of objects. If there
are more objects, nothing will be done. This option is helpful for
testing for scalability issues.
.. versionadded:: 0.7
* ``--blobs`` causes zodbshootout to read and write blobs instead of
simple persistent objects. This can be useful for testing options
like shared blob dirs on network filesystems, or RelStorage's
blob-chunk-size, or for diagnosing performance problems. If objects
have to be added to meet the ``--min-objects`` count, they will also
be blobs. Note that because of the way blobs work, there will be two
times the number of objects stored as specified in
``--object-counts``. Expect this option to cause the test to be much
slower.
.. versionadded:: 0.7
Concurrency
-----------
These options control the concurrency of the testing.
* ``-c`` (``--concurrency``) specifies how many tests to run in
parallel. The default is 2. Each of the concurrent tests runs in a
separate process to prevent contention over the CPython global
interpreter lock. In single-host configurations, the performance
measurements should increase with the concurrency level, up to the
number of CPU cores in the computer. In more complex configurations,
performance will be limited by other factors such as network latency.
.. versionchanged:: 0.7
This option can only be used once.
.. versionchanged:: 0.6
Specify this option more than once to run the
tests with different concurrency levels.
* ``--threads`` uses in-process threads for concurrency instead of
multiprocessing. This can demonstrate how the GIL affects various
database adapters under RelStorage, for instance. It can also have
demonstrate the difference that warmup time makes for things like
PyPy's JIT.
By default or if you give the ``shared`` argument to this option,
all threads will share one ZODB DB object and re-use Connections
from the same pool; most threaded applications will use ZODB in this
manner. If you specify the ``unique`` argument, then each thread
will get its own DB object. In addition to showing how the thread
locking strategy of the underlying storage affects things, this can
also highlight the impact of shared caches.
.. versionadded:: 0.6
* ``--gevent`` monkey-patches the system and uses cooperative greenlet
concurrency in a single process (like ``--threads``, which it
implies; you can specify ``--threads unique`` to change the database
sharing).
This option is only available if gevent is installed.
.. note:: Not all storage types will work properly with this option.
RelStorage will, but make sure you select a
gevent-compatible driver like PyMySQL or pg8000 for best
results. If your driver is not compatible, you may
experience timeouts and failures, including
``UnexpectedChildDeathError``. zodbshootout attempts to
compensate for this, but may not always be successful.
.. versionadded:: 0.6
Repetitions
-----------
These options control how many times tests are repeated.
.. versionchanged:: 0.7
The old ``-r`` and ``--test-reps`` options were removed. Instead,
use the ``--loops``, ``--values`` and ``--processes`` options
provided by pyperf.
Profiling
---------
* ``-p`` (``--profile``) enables the Python profiler while running the
tests and outputs a profile for each test in the specified directory.
Note that the profiler typically reduces the database speed by a lot.
This option is intended to help developers isolate performance
bottlenecks.
.. versionadded:: 0.6
* ``--leaks`` prints a summary of possibly leaking objects after each
test repetition. This is useful for storage and ZODB developers.
.. versionchanged:: 0.7
The old ``-l`` alias is no longer accepted.
.. versionadded:: 0.6
Output
------
These options control the output produced.
.. versionchanged:: 0.7
The ``--dump-json`` argument was removed in favor of pyperf's
native output format, which enables much better analysis using
``pyperf show``.
If the ``-o`` argument is specified, then in addition to creating a
single file containing all the test runs, a file will be created
for each database, allowing for direct comparisons using pyperf's
``compare_to`` command.
* ``--log`` enables logging to the console at the specified level. If
no level is specified but this option is given, then INFO logging
will be enabled. This is useful for details about the workings of a
storage and the effects various options have on it.
.. versionchanged:: 0.8
This option can also take a path to a ZConfig logging
configuration file.
.. versionadded:: 0.6
You should write a configuration file that models your intended
database and network configuration. Running ``zodbshootout`` may reveal
configuration optimizations that would significantly increase your
application's performance.
| zodbshootout | /zodbshootout-0.8.0.tar.gz/zodbshootout-0.8.0/doc/zodbshootout.rst | zodbshootout.rst |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.