code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# import Zope3 interfaces
from zope.security.interfaces import NoInteraction, Unauthorized
# import local interfaces
from hurry.workflow.interfaces import IWorkflowInfo, IWorkflowState, ConditionFailedError
from ztfy.workflow.interfaces import IWorkflow, IWorkflowTarget, ITransition, ITransitionTarget
# import Zope3 packages
from zope.component import adapts, getUtility
from zope.event import notify
from zope.interface import Interface, implements
from zope.lifecycleevent import ObjectModifiedEvent
from zope.security.management import getInteraction
from zope.traversing.browser import absoluteURL
# import local packages
from hurry.workflow.workflow import nullCheckPermission, WorkflowTransitionEvent, WorkflowVersionTransitionEvent
from hurry.workflow.workflow import Workflow as WorkflowBase
from hurry.workflow.workflow import WorkflowInfo as WorkflowInfoBase, WorkflowState as WorkflowStateBase
class WorkflowStateAdapter(WorkflowStateBase):
"""Workflow state adapter implementing ILocation"""
adapts(IWorkflowTarget)
implements(IWorkflowState)
class WorkflowInfoAdapter(WorkflowInfoBase):
"""Enhanced IWorkflowInfo adapter handling several registered workflows"""
adapts(IWorkflowTarget)
implements(IWorkflowInfo)
@property
def name(self):
return IWorkflowTarget(self.context).workflow_name
def fireTransition(self, transition_id, comment=None, side_effect=None,
check_security=True):
state = IWorkflowState(self.context)
wf = getUtility(IWorkflow, self.name)
# this raises InvalidTransitionError if id is invalid for current state
transition = wf.getTransition(state.getState(), transition_id)
# check whether we may execute this workflow transition
try:
interaction = getInteraction()
except NoInteraction:
checkPermission = nullCheckPermission
else:
if check_security:
checkPermission = interaction.checkPermission
else:
checkPermission = nullCheckPermission
if not checkPermission(
transition.permission, self.context):
raise Unauthorized(self.context,
'transition: %s' % transition_id,
transition.permission)
# now make sure transition can still work in this context
if not transition.condition(self, self.context):
raise ConditionFailedError
# perform action, return any result as new version
result = transition.action(self, self.context)
if result is not None:
if transition.source is None:
IWorkflowState(result).initialize()
# stamp it with version
state = IWorkflowState(result)
state.setId(IWorkflowState(self.context).getId())
# execute any side effect:
if side_effect is not None:
side_effect(result)
event = WorkflowVersionTransitionEvent(result, self.context,
transition.source,
transition.destination,
transition, comment)
else:
if transition.source is None:
IWorkflowState(self.context).initialize()
# execute any side effect
if side_effect is not None:
side_effect(self.context)
event = WorkflowTransitionEvent(self.context,
transition.source,
transition.destination,
transition, comment)
# change state of context or new object
state.setState(transition.destination)
notify(event)
# send modified event for original or new object
if result is None:
notify(ObjectModifiedEvent(self.context))
else:
notify(ObjectModifiedEvent(result))
return result
def getFireableTransitionIdsToward(self, state):
wf = getUtility(IWorkflow, self.name)
result = []
for transition_id in self.getFireableTransitionIds():
transition = wf.getTransitionById(transition_id)
if transition.destination == state:
result.append(transition_id)
return result
def _getTransitions(self, trigger):
# retrieve all possible transitions from workflow utility
wf = getUtility(IWorkflow, self.name)
transitions = wf.getTransitions(IWorkflowState(self.context).getState())
# now filter these transitions to retrieve all possible
# transitions in this context, and return their ids
return [transition for transition in transitions
if transition.trigger == trigger]
class Workflow(WorkflowBase):
"""Custom workflow class"""
implements(IWorkflow)
def __init__(self, transitions, states, published_states=()):
super(Workflow, self).__init__(transitions)
self.states = states
self.published_states = published_states
class WorkflowTransitionTargetAdapter(object):
adapts(Interface, Interface, ITransition)
implements(ITransitionTarget)
def __init__(self, context, request, transition):
self.context = context
self.request = request
self.transition = transition
def absoluteURL(self):
return '%s/++wf++%s' % (absoluteURL(self.context, self.request),
self.transition.transition_id) | ztfy.workflow | /ztfy.workflow-0.2.9.tar.gz/ztfy.workflow-0.2.9/src/ztfy/workflow/workflow.py | workflow.py |
__docformat__ = "restructuredtext"
# import standard packages
import transaction
# import Zope3 interfaces
from hurry.workflow.interfaces import IWorkflowState
from zope.app.publication.zopepublication import ZopePublication
from zope.catalog.interfaces import ICatalog
from zope.component.interfaces import IComponentRegistry, ISite
from zope.dublincore.interfaces import IZopeDublinCore
from zope.intid.interfaces import IIntIds
from zope.processlifetime import IDatabaseOpenedWithRoot
# import local interfaces
from ztfy.utils.interfaces import INewSiteManagerEvent
from ztfy.workflow.interfaces import IWorkflowTarget
# import Zope3 packages
from zc.catalog.catalogindex import ValueIndex, DateTimeValueIndex
from zope.catalog.catalog import Catalog
from zope.component import adapter, queryUtility
from zope.intid import IntIds
from zope.location import locate
from zope.site import hooks
# import local packages
from ztfy.utils.site import locateAndRegister
def updateDatabaseIfNeeded(context):
"""Check for missing objects at application startup"""
try:
sm = context.getSiteManager()
except:
return
default = sm['default']
# Check for required IIntIds utility
intids = queryUtility(IIntIds)
if intids is None:
intids = default.get('IntIds')
if intids is None:
intids = IntIds()
locate(intids, default)
default['IntIds'] = intids
IComponentRegistry(sm).registerUtility(intids, IIntIds)
# Check for required catalog and index
catalog = default.get('WorkflowCatalog')
if catalog is None:
catalog = Catalog()
locateAndRegister(catalog, default, 'WorkflowCatalog', intids)
IComponentRegistry(sm).registerUtility(catalog, ICatalog, 'WorkflowCatalog')
if catalog is not None:
if 'wf_id' not in catalog:
index = ValueIndex('getId', IWorkflowState, True)
locateAndRegister(index, catalog, 'wf_id', intids)
if 'wf_state' not in catalog:
index = ValueIndex('getState', IWorkflowState, True)
locateAndRegister(index, catalog, 'wf_state', intids)
if 'wf_name' not in catalog:
index = ValueIndex('workflow_name', IWorkflowTarget, False)
locateAndRegister(index, catalog, 'wf_name', intids)
if 'creation_date' not in catalog:
index = DateTimeValueIndex('created', IZopeDublinCore, False)
locateAndRegister(index, catalog, 'creation_date', intids)
if 'modification_date' not in catalog:
index = DateTimeValueIndex('modified', IZopeDublinCore, False)
locateAndRegister(index, catalog, 'modification_date', intids)
if 'effective_date' not in catalog:
index = DateTimeValueIndex('effective', IZopeDublinCore, False)
locateAndRegister(index, catalog, 'effective_date', intids)
if 'expiration_date' not in catalog:
index = DateTimeValueIndex('expires', IZopeDublinCore, False)
locateAndRegister(index, catalog, 'expiration_date', intids)
@adapter(IDatabaseOpenedWithRoot)
def handleOpenedDatabase(event):
db = event.database
connection = db.open()
root = connection.root()
root_folder = root.get(ZopePublication.root_name, None)
for site in root_folder.values():
if ISite(site, None) is not None:
hooks.setSite(site)
updateDatabaseIfNeeded(site)
transaction.commit()
@adapter(INewSiteManagerEvent)
def handleNewSiteManager(event):
updateDatabaseIfNeeded(event.object) | ztfy.workflow | /ztfy.workflow-0.2.9.tar.gz/ztfy.workflow-0.2.9/src/ztfy/workflow/database.py | database.py |
__docformat__ = "restructuredtext"
# import standard packages
# import Zope3 interfaces
from z3c.language.switch.interfaces import II18n
from zope.dublincore.interfaces import IZopeDublinCore
# import local interfaces
from ztfy.skin.interfaces import IBreadcrumbInfo, IDefaultView
# import Zope3 packages
from zope.component import queryMultiAdapter
from zope.traversing.api import getParents, getName
from zope.traversing.browser import absoluteURL
# import local packages
from ztfy.skin.viewlet import ViewletBase
class BreadcrumbsViewlet(ViewletBase):
viewname = ''
@property
def crumbs(self):
result = []
for parent in reversed([self.context, ] + getParents(self.context)):
info = queryMultiAdapter((parent, self.request, self.__parent__), IBreadcrumbInfo)
if info is not None:
result.append({ 'title': info.title,
'path': info.path })
else:
name = getName(parent)
if name:
name = '[ %s ]' % name
else:
name = '[ root ]'
i18n = II18n(parent, None)
if i18n is not None:
name = i18n.queryAttribute('shortname', request=self.request) or i18n.queryAttribute('title', request=self.request) or name
else:
dc = IZopeDublinCore(parent, None)
if dc is not None:
name = dc.title or name
if name:
adapter = queryMultiAdapter((parent, self.request, self.__parent__), IDefaultView)
if (adapter is not None) and adapter.viewname:
self.viewname = '/' + adapter.viewname
result.append({ 'title': name,
'path': '%s%s' % (absoluteURL(parent, request=self.request),
self.viewname) })
return result | ztfy.zmi | /ztfy.zmi-0.3.1.tar.gz/ztfy.zmi-0.3.1/src/ztfy/zmi/viewlets/header/crumbs.py | crumbs.py |
__docformat__ = "restructuredtext"
# import standard packages
# import Zope3 interfaces
from zope.browsermenu.interfaces import IBrowserMenu, IMenuItemType
from zope.publisher.interfaces.browser import IBrowserSkinType
# import local interfaces
# import Zope3 packages
from zope.component import getAdapters, getUtility, queryUtility
from zope.i18n import translate
# import local packages
from ztfy.skin.menu import MenuItem
from ztfy.skin.viewlet import ViewletBase
from ztfy.zmi import _
class ZmiAccessMenuItem(MenuItem):
"""ZMI access menu item"""
title = _("Management tools...")
@property
def url(self):
host = self.request.get('HTTP_HOST')
return self.request.getURL().replace('//' + host, '//' + host + '/++skin++ZMI')
class ZmiViewsAccessViewlet(ViewletBase):
"""ZMI access menu viewlet"""
def __new__(cls, context, request, view, manager):
skin = queryUtility(IBrowserSkinType, 'ZMI')
if (skin is None) or skin.providedBy(request):
return None
else:
return ViewletBase.__new__(cls, context, request, view, manager)
menu = 'zmi_access'
@property
def title(self):
return translate(_("Management"), context=self.request)
@property
def viewlets(self):
return [ ZmiAccessMenuItem(self.context, self.request, self.__parent__, self) ]
def getMenuItemType(id):
return getUtility(IMenuItemType, id)
def getMenuEntries(menu, object, request):
"""Return menu item entries in a TAL-friendly form."""
items = []
for _name, item in getAdapters((object, request), getMenuItemType(menu.id)):
if item.available():
items.append(item)
result = []
for item in items:
result.append({'title': item.title,
'action': item.action,
'selected': (item.selected() and u'selected') or u'',
'order': item.order })
result.sort(key=lambda x: x['order'])
return result
class ActionMenuItem(MenuItem):
"""ZMI action menu item"""
def __init__(self, context, request, view, manager, menu_entry):
super(ActionMenuItem, self).__init__(context, request, view, manager)
self.menu_entry = menu_entry
@property
def title(self):
return self.menu_entry['title']
@property
def viewURL(self):
return self.menu_entry['action']
class ActionsViewlet(ViewletBase):
"""Actions viewlet"""
@property
def viewlets(self):
menu = getUtility(IBrowserMenu, self.menu)
entries = getMenuEntries(menu, self.context, self.request)
return [ActionMenuItem(self.context, self.request, self.__parent__, self, entry) for entry in entries]
class ZmiViewsMenuViewlet(ActionsViewlet):
"""zmi_views menu viewlet"""
@property
def title(self):
try:
return translate(_("Management"), context=self.request)
except:
return u'Management'
class ZmiActionsMenuViewlet(ActionsViewlet):
"""zmi_actions menu viewlet"""
@property
def title(self):
return translate(_("Console"), context=self.request) | ztfy.zmi | /ztfy.zmi-0.3.1.tar.gz/ztfy.zmi-0.3.1/src/ztfy/zmi/viewlets/actions/__init__.py | __init__.py |
# import standard packages
import multiprocessing
import signal
import sys
import zmq
from zmq.eventloop import ioloop, zmqstream
# import Zope3 interfaces
# import local interfaces
from ztfy.zmq.interfaces import IZMQProcess
# import Zope3 packages
from zope.interface import implements
# import local packages
class ZMQProcess(multiprocessing.Process):
"""
This is the base for all processes and offers utility methods
for setup and creating new streams.
"""
implements(IZMQProcess)
socket_type = zmq.REP
def __init__(self, bind_addr, handler):
super(ZMQProcess, self).__init__()
self.context = None
"""The ØMQ :class:`~zmq.Context` instance."""
self.loop = None
"""PyZMQ's event loop (:class:`~zmq.eventloop.ioloop.IOLoop`)."""
self.bind_addr = bind_addr
self.rep_stream = None
self.handler = handler
def setup(self):
"""Creates a :attr:`context` and an event :attr:`loop` for the process."""
self.context = zmq.Context()
self.loop = ioloop.IOLoop.instance()
self.rep_stream, _ = self.stream(self.socket_type, self.bind_addr, bind=True)
self.initStream()
def initStream(self):
"""Initialize response stream"""
self.rep_stream.on_recv(self.handler(self, self.rep_stream, self.stop))
def run(self):
"""Sets up everything and starts the event loop."""
signal.signal(signal.SIGTERM, self.exit)
self.setup()
self.loop.start()
def stop(self):
"""Stops the event loop."""
if self.loop is not None:
self.loop.stop()
self.loop = None
def exit(self, num, frame):
self.stop()
sys.exit()
def stream(self, sock_type, addr, bind, callback=None, subscribe=b''):
"""
Creates a :class:`~zmq.eventloop.zmqstream.ZMQStream`.
:param sock_type: The ØMQ socket type (e.g. ``zmq.REQ``)
:param addr: Address to bind or connect to formatted as *host:port*,
*(host, port)* or *host* (bind to random port).
If *bind* is ``True``, *host* may be:
- the wild-card ``*``, meaning all available interfaces,
- the primary IPv4 address assigned to the interface, in its
numeric representation or
- the interface name as defined by the operating system.
If *bind* is ``False``, *host* may be:
- the DNS name of the peer or
- the IPv4 address of the peer, in its numeric representation.
If *addr* is just a host name without a port and *bind* is
``True``, the socket will be bound to a random port.
:param bind: Binds to *addr* if ``True`` or tries to connect to it
otherwise.
:param callback: A callback for
:meth:`~zmq.eventloop.zmqstream.ZMQStream.on_recv`, optional
:param subscribe: Subscription pattern for *SUB* sockets, optional,
defaults to ``b''``.
:returns: A tuple containg the stream and the port number.
"""
sock = self.context.socket(sock_type)
# addr may be 'host:port' or ('host', port)
if isinstance(addr, (str, unicode)):
addr = addr.split(':')
host, port = addr if len(addr) == 2 else (addr[0], None)
# Bind/connect the socket
if bind:
if port:
sock.bind('tcp://%s:%s' % (host, port))
else:
port = sock.bind_to_random_port('tcp://%s' % host)
else:
sock.connect('tcp://%s:%s' % (host, port))
# Add a default subscription for SUB sockets
if sock_type == zmq.SUB:
sock.setsockopt(zmq.SUBSCRIBE, subscribe)
# Create the stream and add the callback
stream = zmqstream.ZMQStream(sock, self.loop)
if callback:
stream.on_recv(callback)
return stream, int(port)
def processExitFunc(process=None):
if process is not None:
if process.is_alive():
process.terminate()
process.join() | ztfy.zmq | /ztfy.zmq-0.1.4.tar.gz/ztfy.zmq-0.1.4/src/ztfy/zmq/process.py | process.py |
.. contents::
Introduction
============
ZTFY.zmq is a small ZTFY integration of ZeroMQ library.
It provides two main classes which are:
- ZMQProcess, which is a ZMQ listening process, based on multiprocessing package.
- ZMQMessageHandler, which is a simple ZMQ messages handler which delegates it's functionality
to a ZMQ agnostic handling class.
When creating a new ZMQProcess instance, you only have to specify it's listening address and it's messages
handler.
Default ZMQ process is based on request/response (REQ/REP) messages, but this can easily be overriden in
custom subclasses.
Most of this package main concepts are based on **Stefan Scherfke** work. Thanks to him!
| ztfy.zmq | /ztfy.zmq-0.1.4.tar.gz/ztfy.zmq-0.1.4/docs/README.txt | README.txt |
# zthreading.py
A collection of wrapper classes for event broadcast and task management for python (Python Threads or Asyncio)
# TL;DR
## Events
```python
from zthreading.events import EventHandler
handler = EventHandler()
def handle_test_event(msg: str):
print("The event messge: " + msg)
handler.on("test", handle_test_event)
handler.emit("test", "the message")
```
## Tasks
```python
from zthreading.events import EventHandler
from zthreading.tasks import Task
handler = EventHandler()
def handle_test_event(msg: str):
print("The event messge: " + msg)
def run_in_a_different_thread(msg):
handler.emit("test", msg)
handler.on("test", handle_test_event)
Task(run_in_a_different_thread).start("A message from a thread").join()
```
## Decorators
```python
from zthreading.decorators import collect_consecutive_calls_async, as_task, catch_signal
from singal import Signals, raise_signal
from zthreading.tasks import Task
for i in range(1, 20):
consecutive_calls_action()
@as_task
def my_method_as_task(a:int,b:str):
print(b+str(a))
my_task:Task = my_method_as_task("a",1)
my_task.join()
@catch_signal(Signals.SIGTERM)
def my_signal_ignore_method():
# will be ignored.
raise_signal(Signals.SIGTERM)
@collect_consecutive_calls_async()
def consecutive_calls_action(): # Like save this to file.. for example.
# should be printed twice, once for the first call, and another for the last call.
print("consecutive called action")
```
See decorator help for more
# Environment variables
1. TASKS_DEFAULT_TO_ASYNC_LOOP - If set to "true", will default all tasks to use asyncio.
# Advanced Methods and capabilities
Note: The task object is an EventHandler and has all the capabilities of one.
## Task wait functions (staticmethod)
(Examples in code)
1. Task.wait_for_all (tasks.... )
1. Task.wait_for_some (tasks... )
1. Task.wait_for_one (tasks...)
1. Task.wait_for_events(tasks, event names....)
## Piping events
Transferring events from one handler to another. If a weak reference is used
then then the second handler can be deleted by garbage collection.
```python
from zthreading.events import EventHandler
handler_a = EventHandler()
handler_b = EventHandler()
# Transfer all events to handler b, as
# long as handler b is object is in memory. Will
# not keep handler_b in memory.
handler_a.pipe(handler_b, use_weak_reference=True)
def handle_test_event(msg: str):
print("The event messge: " + msg)
handler_b.on("test", handle_test_event)
handler_a.emit("test", "The piped message")
```
## Streaming events and using tasks to do it.
Events can be streamed (yield generator),
```python
from random import random
from time import sleep
from zthreading.tasks import Task
# A task is an EventHandler, and has the
# on method as well.
task: Task = None
def invoke_timed_events():
sleep(1)
for i in range(1, 10):
sleep(random() / 10)
task.emit("test", f"loop index {i}")
task.stop_all_streams()
task = Task(invoke_timed_events).start()
for ev in task.stream("test"):
print(f"{ev.name}, {ev.args[0]}")
```
# Install
```shell
pip install zthreading
```
## From the git repo directly
To install from master branch,
```shell
pip install git+https://github.com/LamaAni/zthreading.py.git@master
```
To install from a release (tag)
```shell
pip install git+https://github.com/LamaAni/zthreading.py.git@[tag]
```
# Contribution
Feel free to ping me in issues or directly on LinkedIn to contribute.
# Licence
Copyright ©
`Zav Shotan` and other [contributors](https://github.com/LamaAni/postgres-xl-helm/graphs/contributors).
It is free software, released under the MIT licence, and may be redistributed under the terms specified in `LICENSE`.
| zthreading | /zthreading-0.1.19.tar.gz/zthreading-0.1.19/README.md | README.md |
__author__ = 'Zhang Fan'
import traceback
import threading
from zqueue import Queue
from zblocker import BLock_more
class ThreadPool():
class _Close_Thread_Pool():
pass
def __init__(self, maxcount=10):
# 初始化线程池(最多同时运行线程数量)
assert isinstance(maxcount, int) and maxcount > 0, '线程数量必须为整数且大于0'
self.__max_thread_count = maxcount # 最大线程数量
self.__thread_count = 0 # 当前运行线程数量
self.__is_run = True # 是否运行中
self.__is_close = False #
self.__queue = Queue() # 任务队列
self.__run_count = 0 # 线程运行数量
self.__join_lock = BLock_more() # 等待锁
self.__init_threads() # 初始线程池
def __init_threads(self):
# 初始化线程池
self.__thread_list = []
for i in range(self.__max_thread_count): # 循环创建线程
th = threading.Thread(target=self.__thread_fun, args=(i,)) # 将线程目标指向内置任务
th.setDaemon(False)
th.start() # 启动这个线程
self.__thread_list.append(th) # 将线程添加到列表
def __thread_fun(self, thread_index):
self.__thread_count += 1
while self.__is_run:
try:
task = self.__queue.get() # 阻塞等待任务
except: # 管道关闭
break
if task is ThreadPool._Close_Thread_Pool: # 收到结束通知
break
self.__run_task(task, thread_index)
self.__thread_count -= 1
if self.__thread_count == 0: # 最后一个线程结束
self.__join_lock.unlock()
def __run_task(self, task, thread_index):
call, args, kwargs, errback = task
try:
self._log_info('\033[1;36m线程<%d>执行一个新任务\033[0m' % thread_index)
call(*args, **kwargs) # 调用函数
self._log_info('\033[1;32m线程<%d>执行任务完毕\033[0m' % thread_index)
except Exception as err:
if errback is False or errback is None:
return
if errback is True:
self._log_warn('\033[1;35m第<{}>个线程出现错误\n\033[1;31m{}\033[0m'.format(
thread_index, traceback.format_exc()), end='')
return
try:
errback(thread_index, err)
self._log_info('\033[1;32m线程<%d>执行任务完毕\033[0m' % thread_index)
except Exception as err:
self._log_info('\033[1;35m第<{}>个线程执行错误回调的时候,产生了另一个错误\n\033[1;31m{}\033[0m'.format(
thread_index, traceback.format_exc()), end='')
def add_task(self, func, *args, err_callback=True, **kwargs):
'''
添加一个任务, 任务会等待直到有空闲线程去执行他
:param func: 回调函数
:param args: 回调参数
:param err_callback: 错误回调函数, 如果设为True则打印错误, 如果设为False或者None则忽略错误
可以设为一个回调函数, 必须接收两个参数(出错线程索引,错误对象), 如果回调函数发生错误, 会打印错误.
:param kwargs: 回调参数
:return:
'''
assert err_callback is True or err_callback is False or err_callback is None or hasattr(err_callback,
'__call__'), \
'err_callback必须为True, False, None 或者存在__call__属性'
assert not self.__is_close, '任务入口已关闭'
self.__queue.put((func, args, kwargs, err_callback))
def close(self):
# 关闭任务入口不允许再添加任务
if not self.__is_close:
self.__is_close = True
for i in range(self.__max_thread_count):
self.__queue.put(ThreadPool._Close_Thread_Pool) # 通知结束
def join(self):
# 等待所有任务结束
assert self.__is_close, '必须关闭任务入口'
if not self.__queue.is_empty():
self.__join_lock.lock()
def task(self, err_callback=True):
'''
允许使用装饰器来创建任务
:param err_callback: 错误调用函数, 必须接收两个参数(出错线程索引,错误对象)
'''
def _task(func):
def _func(*args, **kwargs):
self.add_task(func, *args, err_callback=err_callback, **kwargs)
return _func
return _task
def _log_info(self, *args, **kw):
print(*args, **kw)
def _log_warn(self, *args, **kw):
print(*args, **kw)
if __name__ == '__main__':
import time
import threading
p = ThreadPool(2)
@p.task()
def fun(a, c):
print(a, '开始', time.strftime(' %H:%M:%S', time.localtime()))
for i in range(c):
time.sleep(0.01)
print(a, ' 结束', time.strftime(' %H:%M:%S', time.localtime()))
fun('aa', 100)
fun('bb', 100)
fun('cc', 100)
fun('dd', 100)
p.close()
p.join()
print('--end--') | zthreadpool | /zthreadpool-1.0.1.tar.gz/zthreadpool-1.0.1/zthreadpool.py | zthreadpool.py |
__author__ = 'Zhang Fan'
import time
import threading
from zsingleton.singleton_decorator import singleton
from zblocker import BLock_more
class Timer():
'''
创建计时器(时间回调,第一次等待时间,循环等待时间
'''
def __init__(self, callback, loop_wait_time: int or float, loop_count: None or int = None,
close_callback=None, meta=None):
'''
初始化计时器
:param callback: 回调函数
:param loop_wait_time: 循环等待时间
:param loop_count: 循环次数, None表示无限循环, 循环次数超出后会自动关闭
:param meta: 保存一个值, 此值由用户设置, 模块仅仅为用户保存这个值而不会使用或者修改这个值
'''
assert hasattr(callback, '__call__'), TypeError("回调函数必须存在__call__属性")
assert loop_wait_time > 0, '循环等待时间必须大于0'
assert loop_count is None or loop_count > 0, '循环次数必须设为None或者大于0'
if not close_callback is None:
assert hasattr(close_callback, '__call__'), TypeError("销毁回调函数必须存在__call__属性")
self.callback = callback
self.loop_wait_time = loop_wait_time
self.loop_count = loop_count
self.close_callback = close_callback
self.meta = meta
self.__callback_count = 0 # 回调函数的次数
self.__interval = 0 # 持续时间
self.__is_run = False
self.__is_close = True
self.__join_lock = BLock_more()
def start(self):
if self.__is_close:
self.__is_close = False
self.__callback_count = 0
self.__interval = 0
self.__tm = Timer_Manager()
self.__tm.add_timer(self)
self.__is_run = True
def pause(self):
self.__is_run = False
def close(self):
'''关闭这个计时器'''
self.pause()
if not self.__is_close:
self.__is_close = True
if not self.close_callback is None:
self.close_callback(self)
self.__join_lock.unlock() # 解锁
@property
def is_run(self):
return self.__is_run and not self.__is_close
@property
def is_close(self):
return self.__is_close
@property
def callback_count(self):
return self.__callback_count
def join(self):
'''阻塞等待这个计时器被关闭'''
if not self.__is_close:
self.__join_lock.lock()
def _update(self, interval):
if self.__is_close or not self.__is_run:
return
self.__interval += interval
while self.__interval >= self.loop_wait_time:
self.__interval -= self.loop_wait_time
self.__callback_count += 1
self.callback(self)
if self.loop_count and self.__callback_count >= self.loop_count:
self.close()
return
@singleton
class Timer_Manager():
def __init__(self):
self.__timer_list = [] # 计时器列表
self.__timer_lock = threading.Lock()
self.__time_interval = 0.01 # 时间间隔(计时器进度)
self.__is_run = False
self.__main_thread_versions = 0 # 主线程版本, 避免偶然情况下会导致同时运行多个__main_thread_fun
def close(self):
with self.__timer_lock:
self.__is_run = False
@property
def time_interval(self):
return self.__time_interval
@time_interval.setter
def time_interval(self, interval: int or float):
if interval < 0.001:
interval = 0.001
self.__time_interval = interval
def add_timer(self, timer: Timer):
with self.__timer_lock:
if not self.__is_run:
self.__is_run = True
self.__main_thread_versions += 1
th = threading.Thread(target=self.__main_thread_fun, args=(self.__main_thread_versions,),
name="Timer_Manager_Thread")
th.setDaemon(False)
th.start()
if timer not in self.__timer_list:
self.__timer_list.append(timer)
def remove_timer(self, timer: Timer):
with self.__timer_lock:
if not self.__is_run:
return
if timer in self.__timer_list:
self.__timer_list.remove(timer)
def __main_thread_fun(self, main_thread_versions):
old_time = time.time()
while main_thread_versions == self.__main_thread_versions:
with self.__timer_lock:
time.sleep(self.__time_interval)
now_time = time.time()
interval = now_time - old_time
old_time = now_time
# 检查版本避免偶然情况下会导致同时调用两次__update
if self.__is_run and main_thread_versions == self.__main_thread_versions:
self.__update(interval)
if not self.__timer_list:
self.__is_run = False
# 不将__clear_timer_list()放在with self.__timer_lock:之外, 避免偶然情况下错误调用__clear_timer_list()
if not self.__is_run:
self.__clear_timer_list()
return
def __update(self, interval):
for timer in self.__timer_list:
if timer.is_close:
self.__timer_list.remove(timer) # python中允许在遍历时删除遍历元素
else:
timer._update(interval)
def __clear_timer_list(self):
for timer in self.__timer_list:
if not timer.is_close:
timer.close()
self.__timer_list.clear()
def create_timer(self, callback, loop_wait_time: int or float, loop_count=0, close_callback=None, meta=None):
return Timer(callback, loop_wait_time, loop_count, close_callback, meta)
if __name__ == '__main__':
def fun1(t: Timer):
print('回调', t.meta, t.callback_count, time.strftime(' %H:%M:%S', time.localtime()))
def fun_de(t: Timer):
print(t.meta, '关闭')
import time
t1 = Timer(fun1, 1, 5, close_callback=fun_de, meta='计时器1 ')
t1.start()
time.sleep(0.5)
t2 = Timer(fun1, 1, 2, meta=' 计时器2 ')
t2.start()
print('等待计时器1结束')
t1.join()
print('结束') | ztimer | /ztimer-1.0.5-py3-none-any.whl/ztimer.py | ztimer.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class NineNineModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/nine_nine_model.py | nine_nine_model.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class ItemDetailV2Model(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
date_time: Optional[str] = fields.date_time_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field
min_commission_rate: Optional[str] = fields.min_commission_rate_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/item_detail_v2_model.py | item_detail_v2_model.py |
from dataclasses import dataclass
from datetime import datetime
from decimal import Decimal
from typing import Optional, Any, List
from dataclasses_json import DataClassJsonMixin
__all__ = ["OrderDetailsResp", "OrderDto", "TkStatusEnum"]
class TkStatusEnum(object):
done = 3 # 订单结算
paid = 12 # 订单付款
cancel = 13 # 订单失效
success = 14 # 确认收货
@dataclass
class OrderDto(DataClassJsonMixin):
# 订单在淘宝拍下付款的时间
tb_paid_time: Optional[str] = None
# 订单付款的时间,该时间同步淘宝,可能会略晚于买家在淘宝的订单创建时间
tk_paid_time: Optional[str] = None
# 买家确认收货的付款金额(不包含运费金额)
# 这个大概率 是空字段 使用 alipay_total_price
pay_price: Optional[str] = None
# 结算预估收入 = 结算金额*提成。
# 以买家确认收货的付款金额为基数,预估您可能获得的收入。
# 因买家退款、您违规推广等原因,可能与您最终收入不一致。最终收入以月结后您实际收到的为准
pub_share_fee: Optional[str] = None
# 买家通过购物车购买的每个商品对应的订单编号,此订单编号并未在淘宝买家后台透出
trade_id: Optional[str] = None
# 二方:佣金收益的第一归属者; 三方:从其他淘宝客佣金中进行分成的推广者
tk_order_role: Optional[int] = None
# 订单确认收货后且商家完成佣金支付的时间
tk_earning_time: Optional[str] = None
# 推广位管理下的推广位名称对应的ID,同时也是pid=mm_1_2_3中的“3”这段数字
adzone_id: Optional[str] = None
# 从结算佣金中分得的收益比率
pub_share_rate: Optional[str] = None
# 维权标签,0 含义为非维权 1 含义为维权订单
refund_tag: Optional[int] = None
# 平台给与的补贴比率,如天猫、淘宝、聚划算等
subsidy_rate: Optional[str] = None
# 提成=收入比率*分成比率。指实际获得收益的比率
tk_total_rate: Optional[str] = None
# 商品所属的根类目,即一级类目的名称
item_category_name: Optional[str] = None
# 商家昵称
seller_nick: Optional[str] = None
# 推广者的会员id
pub_id: Optional[int] = None
# 推广者赚取佣金后支付给阿里妈妈的技术服务费用的比率
alimama_rate: Optional[str] = None
# 平台出资方,如天猫、淘宝、或聚划算等
subsidy_type: Optional[str] = None
# 商品图片
item_img: Optional[str] = None
# 付款预估收入=付款金额*提成。
# 指买家付款金额为基数,预估您可能获得的收入。
# 因买家退款等原因,可能与结算预估收入不一致
pub_share_pre_fee: Optional[str] = None
# 支付总额 买家拍下付款的金额(不包含运费金额)
alipay_total_price: Optional[str] = None
# 商品标题
item_title: Optional[str] = None
# 媒体管理下的对应ID的自定义名称
site_name: Optional[str] = None
# 商品数量
item_num: Optional[int] = None
# 补贴金额=结算金额*补贴比率
subsidy_fee: Optional[str] = None
# 技术服务费=结算金额*收入比率*技术服务费率。
# 推广者赚取佣金后支付给阿里妈妈的技术服务费用
alimama_share_fee: Optional[str] = None
# 交易父 ID 买家在淘宝后台显示的订单编号
# [用户可以看到的订单号码]
trade_parent_id: Optional[str] = None
# 订单所属平台类型,包括天猫、淘宝、聚划算等
order_type: Optional[str] = None
# 订单创建的时间,该时间同步淘宝,可能会略晚于买家在淘宝的订单创建时间
tk_create_time: Optional[str] = None
# 产品类型
flow_source: Optional[str] = None
# 成交平台
terminal_type: Optional[str] = None
# 通过推广链接达到商品、店铺详情页的点击时间
click_time: Optional[str] = None
# 淘宝客 状态
# 已付款:指订单已付款,但还未确认收货
# 已收货:指订单已确认收货,但商家佣金未支付
# 已结算:指订单已确认收货,且商家佣金已支付成功
# 已失效:指订单关闭/订单佣金小于0.01元,
# 订单关闭主要有:
# 1)买家超时未付款;
# 2)买家付款前,买家/卖家取消了订单;
# 3)订单付款后发起售中退款成功;
#
# 取值:
# 3:订单结算,结算成功 [说明淘宝已经给这个订单结算了]
# 12:订单付款, [用户已经付款]
# 13:订单失效, [订单被取消 等]
# 14:订单成功/确认收货
tk_status: Optional[int] = None
# 商品单价
item_price: Optional[str] = None
# 商品id
item_id: Optional[int] = None
# 推广位管理下的自定义推广位名称
adzone_name: Optional[str] = None
# 佣金比率
total_commission_rate: Optional[str] = None
# 商品的 URL 商品链接
item_link: Optional[str] = None
# 媒体管理下的ID,同时也是pid=mm_1_2_3中的“2”这段数字
site_id: Optional[int] = None
# 店铺名称
seller_shop_title: Optional[str] = None
# 订单结算的佣金比率+平台的补贴比率
income_rate: Optional[str] = None
# 佣金金额=结算金额*佣金比率
total_commission_fee: Optional[str] = None
# 预估内容专项服务费:
# 内容场景专项技术服务费,内容推广者在内容场景进行推广需要支付给阿里妈妈专项的技术服务费用。
# 专项服务费=付款金额*专项服务费率。
tk_commission_pre_fee_for_media_platform: Optional[str] = None
# 结算内容专项服务费:
# 内容场景专项技术服务费,内容推广者在内容场景进行推广需要支付给阿里妈妈专项的技术服务费用。
# 专项服务费=结算金额*专项服务费率。
tk_commission_fee_for_media_platform: Optional[str] = None
# 内容专项服务费率:
# 内容场景专项技术服务费率,
# 内容推广者在内容场景进行推广需要按结算金额支付一定比例给阿里妈妈作为内容场景专项技术服务费,
# 用于提供与内容平台实现产品技术对接等服务。
tk_commission_rate_for_media_platform: Optional[str] = None
# 会员运营id
special_id: Optional[int] = None
# 渠道关系id
relation_id: Optional[int] = None
# 预售时期,用户对预售商品支付定金的付款时间,可能略晚于在淘宝付定金时间
tk_deposit_time: Optional[str] = None
# 预售时期,用户对预售商品支付定金的付款时间
tb_deposit_time: Optional[str] = None
# 预售时期,用户对预售商品支付的定金金额
deposit_price: Optional[str] = None
# 开发者调用api的appkey
app_key: Optional[str] = None
# 口碑子订单号
alsc_id: Optional[str] = None
# 口碑父订单号
alsc_pid: Optional[str] = None
# 服务费信息
service_fee_dto_list: Optional[Any] = None
# 激励池对应的rid
lx_rid: Optional[str] = None
# 订单是否为激励池订单 1,表征是 0,表征否
is_lx: Optional[str] = None
def ali_pay_price(self) -> Optional[Decimal]:
if self.alipay_total_price is None:
return None
return Decimal(self.alipay_total_price)
def income(self) -> Optional[Decimal]:
if self.pub_share_fee is not None:
ret = Decimal(self.pub_share_fee)
if ret >= 0.01:
return ret
if self.pub_share_pre_fee is None:
return Decimal(0)
return Decimal(self.pub_share_pre_fee)
def order_ctime(self) -> Optional[datetime]:
return self._str_to_datetime(self.tk_create_time)
def end_time(self) -> Optional[datetime]:
return self._str_to_datetime(self.tk_earning_time)
def pay_time(self) -> Optional[datetime]:
return self._str_to_datetime(self.tk_paid_time)
@staticmethod
def _str_to_datetime(s: Optional[str]) -> Optional[datetime]:
if s is None:
return None
try:
return datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
except ValueError:
return None
def order_platform(self) -> str:
"""
获取订单的平台
"""
if self.order_type is None:
return "--"
else:
return self.order_type
def is_order_canceled(self) -> bool:
"""
订单失效/取消
"""
return self.tk_status == TkStatusEnum.cancel
def is_order_paid(self) -> bool:
"""
订单已经付款
"""
return self.tk_status == TkStatusEnum.paid
def is_order_success(self) -> bool:
"""
已经收货
"""
return self.tk_status == TkStatusEnum.success
def is_order_done(self) -> bool:
"""
已经结算
"""
return self.tk_status == TkStatusEnum.done
@dataclass
class OrderDetailsResp(DataClassJsonMixin):
has_next: bool
has_pre: bool
page_no: int
page_size: int
position_index: str
# use get_order_lists 获取订单列表
results: Optional[dict]
@staticmethod
def from_result(j: dict) -> Optional["OrderDetailsResp"]:
resp = j["tbk_sc_order_details_get_response"]
if "data" not in resp:
return None
data = resp["data"]
if not isinstance(data, dict):
return None
if len(data["results"]) == 0:
data["results"] = None
return OrderDetailsResp.from_dict(data)
def get_order_lists(self) -> List[OrderDto]:
if not isinstance(self.results, dict):
return []
order_list = []
if "publisher_order_dto" in self.results:
order_list = self.results["publisher_order_dto"]
return list(map(lambda x: OrderDto.from_dict(x), order_list)) | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/order_details_resp.py | order_details_resp.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class GaoYongModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
taobao_url: Optional[str] = fields.taobao_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
date_time: Optional[str] = fields.date_time_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field
min_commission_rate: Optional[str] = fields.min_commission_rate_field
coupon_click_url: Optional[str] = fields.coupon_click_url_field
item_url: Optional[str] = fields.item_url_field
shorturl: Optional[str] = fields.shorturl_field
tkl: Optional[str] = fields.tkl_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/gao_yong_model.py | gao_yong_model.py |
import json
from datetime import datetime
from typing import Optional, List
import aiohttp
import structlog
from tbk_api import TbkItemInfo
from .bang_dan_tui_jian_args import BangDanTuiJianArgs
from .batch_item_args import BatchItemsArgs
from .batch_item_resp import BatchItemResp, BatchItemContentItem
from .guess_you_like_args import GuessYouLikeArgs
from .ju_hua_suan_args import JuHuaSuanArgs
from .keyword_args import KeywordArgs
from .nine_nine_args import NineNineArgs
from .search_args import SearchArgs
from .suggest_args import SuggestArgs
from .tmall_chao_shi_args import TMallChaoShiArgs
from .tmall_shang_pin_args import TMallShangPinArgs
__all__ = ["ZTKStd"]
class ZTKStd(object):
"""
标准版本的 淘宝客 API
折淘客 开放平台 API
"""
def __init__(self, ztk_sid: str, logger: structlog.stdlib.BoundLogger):
"""
:param logger: 日志记录器
"""
self._http: Optional[aiohttp.ClientSession] = None
self._sid = ztk_sid
self._logger = logger
async def bang_dan_tui_jian(
self, args: BangDanTuiJianArgs
) -> Optional[List[TbkItemInfo]]:
"""
全天销量榜API:返回24小时内销量榜单商品列表(前600个),返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def batch_items(self, args: BatchItemsArgs) -> BatchItemResp:
"""
批量获取 淘宝 信息
:param args:
:return:
"""
url = await args.to_http_url()
j = await self._do_query(url)
return BatchItemResp.from_dict(j)
async def guess_you_like(
self, args: GuessYouLikeArgs
) -> Optional[List[TbkItemInfo]]:
"""
猜你喜欢
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def ju_hua_suan(self, args: JuHuaSuanArgs) -> Optional[List[TbkItemInfo]]:
"""
聚划算商品API:返回聚划算商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def keyword(self) -> list:
"""
关键词词典API
"""
args = KeywordArgs()
url = await args.to_http_url()
j = await self._do_query(url)
if isinstance(j, dict):
if j["status"] == 200:
content = j["content"]
assert isinstance(content, list)
return list(map(lambda x: x["keywords"], content))
return []
async def nine_nine(self, args: NineNineArgs) -> Optional[List[TbkItemInfo]]:
"""
9.9元商品API:返回购买价格≤9.9元的商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def search(self, args: SearchArgs) -> Optional[List[TbkItemInfo]]:
"""
全网搜索
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def suggest(self, args: SuggestArgs) -> Optional[List[str]]:
"""
联想词
"""
url = await args.to_http_url()
j = await self._do_query(url)
if not isinstance(j, dict):
self._logger.warn("request ztk suggest failed")
return None
result = j["result"]
return list(map(lambda item: item[0], result))
async def tmall_chao_shi(
self, args: TMallChaoShiArgs
) -> Optional[List[TbkItemInfo]]:
"""
天猫超市商品API:返回天猫超市商品列表,动态描述分≥4.6的商品列表,根据不同参数可返回天猫超市单件免邮商品。
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def tmall_shang_pin(
self, args: TMallShangPinArgs
) -> Optional[List[TbkItemInfo]]:
"""
天猫商品API:返回天猫商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
ret = await self._do_list_query_help(url)
return ret
async def _do_query(self, url: str) -> dict:
if self._http is None:
self._http = aiohttp.ClientSession()
async with self._http.get(url) as result:
text = await result.text()
return json.loads(text)
async def _ztk_sid(self) -> Optional[str]:
"""
获取 折淘客的 sid
"""
return self._sid
async def _do_list_query_help(self, url: str) -> Optional[List[TbkItemInfo]]:
ret = await self._do_query(url)
if ret.get("status", None) != 200:
self._logger.error(f"ztk fetch {url=} failed: {ret=}")
return None
content = ret.get("content", [])
if len(content) < 1:
self._logger.warning(f"ztk fetch {url=} without data")
return []
tao_ids = list(map(lambda x: x["tao_id"], content))
return await self._do_fetch_batch_items(tao_ids)
async def _do_fetch_batch_items(
self, tao_ids: List[str]
) -> Optional[List[TbkItemInfo]]:
args = BatchItemsArgs(num_iids=",".join(tao_ids))
ret = await self.batch_items(args)
if ret.status != 200:
self._logger.error(f"ztk fetch batch item {args=} failed: {ret=}")
return None
data_list = map(lambda x: BatchItemContentItem(**x), ret.content)
def convert_item_to_std(item: BatchItemContentItem) -> TbkItemInfo:
#
# http://www.zhetaoke.com/user/extend/extend_lingquan_detail.aspx
#
coupon_recv_num = int(item.coupon_total_count) - int(
item.coupon_remain_count
)
return TbkItemInfo(
tao_id=item.tao_id,
tao_img=item.pict_url,
tao_link=item.item_url,
tao_details=item.pcDescContent.split("|"),
title_short=item.title,
title_long=item.tao_title,
price_origin=float(item.size),
price_actual=float(item.quanhou_jiage),
price_coupon=float(item.coupon_info_money),
seller_id=item.seller_id,
seller_name=item.shop_title,
seller_logo=item.shopIcon,
score_dsr=float(item.score1),
score_ship=float(item.score3),
score_service=float(item.score2),
commission_rate=float(item.tkrate3),
commission_money=float(item.tkfee3),
sale_month=int(item.volume),
sale_day=0,
sale_two_hours=0,
coupon_start_time=datetime.fromisoformat(item.coupon_start_time),
coupon_end_time=datetime.fromisoformat(item.coupon_end_time),
coupon_total_num=int(item.coupon_total_count),
coupon_recv_num=coupon_recv_num,
coupon_link=None,
yun_fei_xian=item.yunfeixian == "1",
)
return list(map(convert_item_to_std, data_list)) | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/ztk_std.py | ztk_std.py |
import json
from typing import Optional, List
import aiohttp
import structlog
from .auth_account_args import AuthAccountArgs
from .bang_dan_tui_jian_args import BangDanTuiJianArgs
from .bang_dan_tui_jian_resp import BangDanTuiJianResp
from .batch_item_args import BatchItemsArgs
from .batch_item_resp import BatchItemResp
from .channel_account_info_args import ChannelAccountInfoArgs
from .channel_id_list_args import ChannelIdListArgs
from .channel_invite_code_args import ChannelInviteCodeArgs
from .channel_save_record_args import ChannelSaveRecordArgs
from .gao_yong_args import GaoYongArgs
from .gao_yong_resp import GaoYongResp
from .guess_you_like_args import GuessYouLikeArgs
from .guess_you_like_resp import GuessYouLikeResp
from .item_detail_resp import ItemDetailResp
from .item_detail_v2_args import ItemDetailV2Args
from .item_detail_v2_resp import ItemDetailV2Resp
from .ju_hua_suan_args import JuHuaSuanArgs
from .ju_hua_suan_resp import JuHuaSuanResp
from .keyword_args import KeywordArgs
from .new_order_args import NewOrderArgs
from .nine_nine_args import NineNineArgs
from .nine_nine_resp import NineNineResp
from .search_args import SearchArgs
from .search_resp import SearchResp
from .suggest_args import SuggestArgs
from .tkl_create_args import TKLCreateArgs
from .tkl_create_resp import TKLCreateResp
from .tmall_chao_shi_args import TMallChaoShiArgs
from .tmall_chao_shi_resp import TmallChaoShiResp
from .tmall_shang_pin_args import TMallShangPinArgs
from .tmall_shang_pin_resp import TmallShangPinResp
__all__ = ["ZTK"]
class ZTK(object):
"""
折淘客 开放平台 API
"""
def __init__(self, ztk_sid: str, logger: structlog.stdlib.BoundLogger):
"""
:param logger: 日志记录器
"""
self._http: Optional[aiohttp.ClientSession] = None
self._sid = ztk_sid
self._logger = logger
async def auth_account(self, args: AuthAccountArgs):
"""
获取账户授权列表API接口
"""
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return j
async def bang_dan_tui_jian(self, args: BangDanTuiJianArgs) -> BangDanTuiJianResp:
"""
全天销量榜API:返回24小时内销量榜单商品列表(前600个),返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
j = await self._do_query(url)
return BangDanTuiJianResp.from_dict(j)
async def batch_items(self, args: BatchItemsArgs) -> BatchItemResp:
"""
批量获取 淘宝 信息
:param args:
:return:
"""
url = await args.to_http_url()
j = await self._do_query(url)
return BatchItemResp.from_dict(j)
async def channel_account_info(self, args: ChannelAccountInfoArgs):
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return j
async def channel_id_list(self, args: ChannelIdListArgs):
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return j
async def channel_invite_code(self, args: ChannelInviteCodeArgs):
"""
淘宝客邀请码生成API:生成邀请码,然后让用户进行渠道备案,生成新的渠道ID
"""
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return j
async def channel_save_record(self, args: ChannelSaveRecordArgs):
"""
淘宝客渠道备案API:用户进行渠道备案,生成新的渠道ID。
"""
url = await args.to_http_url()
j = await self._do_query(url)
return j
async def gao_yong(self, args: GaoYongArgs) -> GaoYongResp:
"""
高佣转链API (商品ID)
"""
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return GaoYongResp.from_dict(j)
async def guess_you_like(self, args: GuessYouLikeArgs) -> GuessYouLikeResp:
"""
猜你喜欢
"""
url = await args.to_http_url()
j = await self._do_query(url)
return GuessYouLikeResp.from_dict(j)
async def item_detail_v2(self, args: ItemDetailV2Args) -> ItemDetailV2Resp:
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return ItemDetailV2Resp.from_dict(j)
async def ju_hua_suan(self, args: JuHuaSuanArgs) -> JuHuaSuanResp:
"""
聚划算商品API:返回聚划算商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
j = await self._do_query(url)
return JuHuaSuanResp.from_dict(j)
async def keyword(self) -> list:
"""
关键词词典API
"""
args = KeywordArgs()
url = await args.to_http_url()
j = await self._do_query(url)
if isinstance(j, dict):
if j["status"] == 200:
content = j["content"]
assert isinstance(content, list)
return list(map(lambda x: x["keywords"], content))
return []
async def new_order(self, args: NewOrderArgs):
"""
新订单获取
"""
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return ItemDetailResp.from_ztk_resp(j)
async def nine_nine(self, args: NineNineArgs):
"""
9.9元商品API:返回购买价格≤9.9元的商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
j = await self._do_query(url)
return NineNineResp.from_dict(j)
async def search(self, args: SearchArgs) -> SearchResp:
"""
全网搜索
"""
url = await args.to_http_url()
j = await self._do_query(url)
return SearchResp.from_dict(j)
async def suggest(self, args: SuggestArgs) -> Optional[List[str]]:
"""
联想词
"""
url = await args.to_http_url()
j = await self._do_query(url)
if not isinstance(j, dict):
self._logger.warn("request ztk suggest failed")
return None
result = j["result"]
return list(map(lambda item: item[0], result))
async def tmall_chao_shi(self, args: TMallChaoShiArgs) -> TmallChaoShiResp:
"""
天猫超市商品API:返回天猫超市商品列表,动态描述分≥4.6的商品列表,根据不同参数可返回天猫超市单件免邮商品。
"""
url = await args.to_http_url()
j = await self._do_query(url)
return TmallChaoShiResp.from_dict(j)
async def tmall_shang_pin(self, args: TMallShangPinArgs) -> TmallShangPinResp:
"""
天猫商品API:返回天猫商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = await args.to_http_url()
j = await self._do_query(url)
return TmallShangPinResp.from_dict(j)
async def tkl_create(self, args: TKLCreateArgs) -> TKLCreateResp:
"""
淘口令生成API:二合一链接、长链接、短链接等各种淘宝高佣链接,生成淘口令
"""
args.sid = await self._ztk_sid()
url = await args.to_http_url()
j = await self._do_query(url)
return TKLCreateResp.from_dict(j)
async def do_query(self, url: str) -> dict:
"""
这是给外部使用的函数
:param url:
:return:
"""
return await self._do_query(url)
async def _do_query(self, url: str) -> dict:
if self._http is None:
self._http = aiohttp.ClientSession()
async with self._http.get(url) as result:
text = await result.text()
return json.loads(text)
async def _ztk_sid(self) -> Optional[str]:
"""
获取 折淘客的 sid
"""
return self._sid | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/ztk.py | ztk.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class JuHuaSuanModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/ju_hua_suan_model.py | ju_hua_suan_model.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class GuessYouLikeModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/guess_you_like_model.py | guess_you_like_model.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class TmallShangPinModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/tmall_shang_pin_model.py | tmall_shang_pin_model.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class BangDanTuiJianModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
volume_shishi: Optional[str] = fields.volume_shishi_field
volume_quantian: Optional[str] = fields.volume_quantian_field
tk_sale_count: Optional[str] = fields.tk_sale_count_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/bang_dan_tui_jian_model.py | bang_dan_tui_jian_model.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class SearchModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/search_model.py | search_model.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class TmallChaoShiModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/tmall_chao_shi_model.py | tmall_chao_shi_model.py |
import json
from typing import Optional, List
import requests
import structlog
from .auth_account_args import AuthAccountArgs
from .bang_dan_tui_jian_args import BangDanTuiJianArgs
from .bang_dan_tui_jian_resp import BangDanTuiJianResp
from .channel_account_info_args import ChannelAccountInfoArgs
from .channel_id_list_args import ChannelIdListArgs
from .channel_invite_code_args import ChannelInviteCodeArgs
from .channel_save_record_args import ChannelSaveRecordArgs
from .gao_yong_args import GaoYongArgs
from .guess_you_like_args import GuessYouLikeArgs
from .guess_you_like_resp import GuessYouLikeResp
from .item_detail_args import ItemDetailArgs
from .item_detail_resp import ItemDetailResp
from .item_detail_v2_args import ItemDetailV2Args
from .item_detail_v2_resp import ItemDetailV2Resp
from .ju_hua_suan_args import JuHuaSuanArgs
from .ju_hua_suan_resp import JuHuaSuanResp
from .keyword_args import KeywordArgs
from .new_order_args import NewOrderArgs
from .nine_nine_args import NineNineArgs
from .nine_nine_resp import NineNineResp
from .search_args import SearchArgs
from .search_resp import SearchResp
from .suggest_args import SuggestArgs
from .tkl_create_args import TKLCreateArgs
from .tkl_create_resp import TKLCreateResp
from .tmall_chao_shi_args import TMallChaoShiArgs
from .tmall_chao_shi_resp import TmallChaoShiResp
from .tmall_shang_pin_args import TMallShangPinArgs
from .tmall_shang_pin_resp import TmallShangPinResp
__all__ = ["ZTKSync"]
class ZTKSync(object):
"""
折淘客 开放平台 API
"""
def __init__(self, ztk_sid: str, logger: structlog.stdlib.BoundLogger):
"""
:param logger: 日志记录器
"""
self._http: requests.Session = requests.session()
self._sid = ztk_sid
self._logger = logger
def auth_account(self, args: AuthAccountArgs):
"""
获取账户授权列表API接口
"""
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def bang_dan_tui_jian(self, args: BangDanTuiJianArgs) -> BangDanTuiJianResp:
"""
全天销量榜API:返回24小时内销量榜单商品列表(前600个),返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return BangDanTuiJianResp.from_dict(j)
def channel_account_info(self, args: ChannelAccountInfoArgs):
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def channel_id_list(self, args: ChannelIdListArgs):
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def channel_invite_code(self, args: ChannelInviteCodeArgs):
"""
淘宝客邀请码生成API:生成邀请码,然后让用户进行渠道备案,生成新的渠道ID
"""
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def channel_save_record(self, args: ChannelSaveRecordArgs):
"""
淘宝客渠道备案API:用户进行渠道备案,生成新的渠道ID。
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def gao_yong(self, args: GaoYongArgs):
"""
高佣转链API (商品ID)
"""
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def guess_you_like(self, args: GuessYouLikeArgs) -> GuessYouLikeResp:
"""
猜你喜欢
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return GuessYouLikeResp.from_dict(j)
def item_detail(self, args: ItemDetailArgs) -> ItemDetailResp:
"""
折淘客 全网商品详情接口参数
"""
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
try:
return ItemDetailResp.from_ztk_resp(j)
except Exception as e:
self._logger.bind(exec=e).error("item detail error")
raise
def item_detail_v2(self, args: ItemDetailV2Args) -> ItemDetailV2Resp:
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return ItemDetailV2Resp.from_dict(j)
def ju_hua_suan(self, args: JuHuaSuanArgs) -> JuHuaSuanResp:
"""
聚划算商品API:返回聚划算商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return JuHuaSuanResp.from_dict(j)
def keyword(self) -> list:
"""
关键词词典API
"""
args = KeywordArgs()
url = args.to_http_url_sync()
j = self._do_query(url)
if isinstance(j, dict):
if j["status"] == 200:
content = j["content"]
assert isinstance(content, list)
return list(map(lambda x: x["keywords"], content))
return []
def new_order(self, args: NewOrderArgs) -> dict:
"""
新订单获取
"""
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return j
def nine_nine(self, args: NineNineArgs):
"""
9.9元商品API:返回购买价格≤9.9元的商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return NineNineResp.from_dict(j)
def search(self, args: SearchArgs) -> SearchResp:
"""
全网搜索
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return SearchResp.from_dict(j)
def suggest(self, args: SuggestArgs) -> Optional[List[str]]:
"""
联想词
"""
url = args.to_http_url_sync()
j = self._do_query(url)
if not isinstance(j, dict):
self._logger.warn("request ztk suggest failed")
return None
result = j["result"]
return list(map(lambda item: item[0], result))
def tmall_chao_shi(self, args: TMallChaoShiArgs) -> TmallChaoShiResp:
"""
天猫超市商品API:返回天猫超市商品列表,动态描述分≥4.6的商品列表,根据不同参数可返回天猫超市单件免邮商品。
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return TmallChaoShiResp.from_dict(j)
def tmall_shang_pin(self, args: TMallShangPinArgs) -> TmallShangPinResp:
"""
天猫商品API:返回天猫商品列表,返回佣金≥15%,动态描述分≥4.6的商品列表。
"""
url = args.to_http_url_sync()
j = self._do_query(url)
return TmallShangPinResp.from_dict(j)
def tkl_create(self, args: TKLCreateArgs) -> TKLCreateResp:
"""
淘口令生成API:二合一链接、长链接、短链接等各种淘宝高佣链接,生成淘口令
"""
args.sid = self._ztk_sid()
url = args.to_http_url_sync()
j = self._do_query(url)
return TKLCreateResp.from_dict(j)
def do_query(self, url: str) -> dict:
"""
这是给外部使用的函数
:param url:
:return:
"""
return self._do_query(url)
def _do_query(self, url: str) -> dict:
self._http.get(url)
ret = self._http.get(url)
if ret.ok:
return json.loads(ret.text)
def _ztk_sid(self) -> Optional:
return self._sid | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/ztk_sync.py | ztk_sync.py |
from pydantic import Field
__all__ = ["page_field", "page_size_field", "cid_field", "sort_fields"]
token = Field(..., title="认证 TOKEN", description="用户用来认证的 token, 登陆的时候获取到的")
token_optional = Field(None, title="认证 TOKEN", description="用户用来认证的 token, 登陆的时候获取到的")
page_field = Field(1, gt=0, title="分页", description="第几页面")
page_size_field = Field(20, ge=10, le=50, title="每页数据条数", description="可自定义 10~50 之间")
cid_field = Field(
None,
title="一级商品分类",
description="""一级商品分类
值为空:全部商品
1:女装
2:母婴
3:美妆
4:居家日用
5:鞋品
6:美食
7:文娱车品
8:数码家电
9:男装
10:内衣
11:箱包
12:配饰
13:户外运动
14:家装家纺
""",
)
sort_fields = Field(
"new",
title="商品排序方式",
description="""
new:按照综合排序\n
total_sale_num_asc:按照总销量从小到大排序\n
total_sale_num_desc:按照总销量从大到小排序\n
sale_num_asc:按照月销量从小到大排序\n
sale_num_desc:按照月销量从大到小排序\n
commission_rate_asc:按照佣金比例从小到大排序\n
commission_rate_desc:按照佣金比例从大到小排序\n
price_asc:按照价格从小到大排序\n
price_desc:按照价格从大到小排序\n
coupon_info_money_asc:按照优惠券金额从小到大排序\n
coupon_info_money_desc:按照优惠券金额从大到小排序\n
shop_level_asc:按照店铺等级从低到高排序\n
shop_level_desc:按照店铺等级从高到低排序\n
tkfee_asc:按照返佣金额从低到高排序\n
tkfee_desc:按照返佣金额从高到低排序\n
code:按照code值从大到小排序\n
date_time:按照更新时间排序\n
random:按照随机排序\n
""",
)
code_field = Field(..., title="折淘客编号")
type_one_id_field = Field(..., title="分类 id, 参见: cid")
tao_id_field = Field(
..., title="淘宝的商品 id", description="都是同一个意思: item_id/num_iid/tao_id"
)
title_field = Field(..., title="商品的标题")
jianjie_field = Field(..., title="商品的简介")
pict_url_field = Field(..., title="商品的主图")
user_type_field = Field(..., title="卖家类型", description="0表示集市,1表示商城")
seller_id_field = Field(..., title="商家的 id")
shop_dsr_field = Field(..., title="店铺 dsr 评分")
volume_field = Field(..., title="30天销量")
size_field = Field(..., title="折扣价")
quanhou_jiage_field = Field(..., title="券后价格")
date_time_yongjin_field = Field(
..., title="数据更新时间", description="例如: 2020/06/25 20:35:46"
)
tkrate3_field = Field(..., title="佣金比例", description="例如: 25.00 表示 25.0%")
yongjin_type_field = Field(..., title="佣金类型", description="例如: MKT")
coupon_id_field = Field(..., title="优惠券ID", description="")
coupon_start_time_field = Field(..., title="优惠券开始时间", description="例如: 2020-10-10")
coupon_end_time_field = Field(..., title="优惠券结束时间", description="例如: 2020-10-10")
coupon_info_money_field = Field(..., title="优惠券金额", description="例如: 70.00 单位: 元")
coupon_total_count_field = Field(..., title="优惠券总量")
coupon_remain_count_field = Field(..., title="优惠券剩余数量")
coupon_info_field = Field(..., title="优惠券信息")
juhuasuan_field = Field(..., title="是否为 聚划算", description="0 否 1 是, 注意: 是字符串")
taoqianggou_field = Field(..., title="是否为 淘抢购", description="0 否 1 是, 注意: 是字符串")
haitao_field = Field(..., title="是否为 海淘", description="0 否 1 是, 注意: 是字符串")
jiyoujia_field = Field(..., title="是否极有家", description="0 否 1 是, 注意: 是字符串")
jinpaimaijia_field = Field(..., title="是否为 金牌卖家", description="0 否 1 是, 注意: 是字符串")
pinpai_field = Field(..., title="是否为 品牌", description="0 否 1 是, 注意: 是字符串")
pinpai_name_field = Field(..., title="品牌名称")
yunfeixian_feild = Field(..., title="是否有运费险", description="1有")
nick_field = Field(..., title="店铺名称")
small_images_field = Field(
..., title="商品组图/商品小图列表", description="商品组图, 为多个 url, 使用 | 分割"
)
white_image_field = Field(..., title="商品白底图")
tao_title_field = Field(..., title="商品长标题")
provcity_field = Field(..., title="省份 城市")
shop_title_field = Field(..., title="商家名称")
zhibo_url_field = Field(..., title="直播地址")
sellCount_field = Field(..., title="销售数量")
commentCount_field = Field(..., title="评论数量")
favcount_field = Field(..., title="喜欢数量")
score1_field = Field(..., title="宝贝描述分")
score2_field = Field(..., title="卖家服务分")
score3_field = Field(..., title="物流服务分")
creditLevel_field = Field(
...,
title="店铺等级",
description="""1-20:
一星 二星 三星 四星 五星
一钻 二钻 三钻 四钻 五钻
一皇冠 二皇冠 三皇冠 四皇冠 五皇冠
一金冠 二金冠 三金冠 四金冠 五金冠
""",
)
shopIcon_field = Field(..., title="商家图标 URL")
pcDescContent_field = Field(..., title="内容 url", description="多个 url 地址 使用 | 分隔开")
item_url_field = Field(..., title="商品地址")
category_id_field = Field(..., title="分类 id")
category_name_field = Field(..., title="分类名称")
level_one_category_id_field = Field(..., title="???")
level_one_category_name_field = Field(..., title="???")
tkfee3_field = Field(..., title="???")
volume_shishi_field = Field(..., title="两小时销量")
volume_quantian_field = Field(..., title="全天销量")
tk_sale_count_field = Field(..., title="人气值")
biaoqian_field = Field(..., title="店铺活动", description="满300元,省30元")
tag_field = Field(
...,
title="朋友圈文案",
description="例如: 赶紧拯救你厨房的油腻吧😂植护🌿加厚厨房抽纸高密度纤维纸张,吸油💧很不错5包装|¥14.8元💰✔厨房小帮手,用处多多",
)
presale_discount_fee_text_field = Field(
None, title="预售商品-商品优惠信息", description="例如: 付定金立减20元"
)
presale_tail_end_time_field = Field(None, title="预售商品-付定金结束时间(毫秒)")
presale_tail_start_time_field = Field(None, title="预售商品-付尾款开始时间(毫秒)")
presale_end_time_field = Field(None, title="预售结束时间")
presale_start_time_field = Field(None, title="预售开始时间")
presale_deposit_field = Field(None, title="预售 ???")
yunfeixian_field = Field(..., title="???")
cat_leaf_name_field = Field(..., title="叶子类目名称", description="例如: 情趣内衣")
cat_name_field = Field(..., title="一级类目名称", description="例如: 女装")
ju_online_end_time_field = Field(..., title="聚划算信息-聚淘开始时间(毫秒)")
ju_online_start_time_field = Field(..., title="聚划算信息-聚淘结束时间(毫秒)")
ju_pre_show_end_time_field = Field(..., title="聚划算信息-商品预热结束时间(毫秒)")
ju_pre_show_start_time_field = Field(..., title="聚划算信息-商品预热开始时间(毫秒)")
material_lib_type_field = Field(
...,
title="商品库类型",
description="支持多库类型输出,以英文逗号分隔“,”分隔\n1:营销商品主推库\n2. 内容商品库,如果值为空则不属于1,2这两种商品类型",
)
num_iid_field = tao_id_field
reserve_price_field = Field(..., title="商品一口价格")
tmall_play_activity_end_time_field = Field(..., title="天猫限时抢可售 -结束时间(毫秒)")
tmall_play_activity_start_time_field = Field(..., title="天猫限时抢可售 -开始时间(毫秒)")
zk_final_price_field = Field(..., title="折扣价(元)若属于预售商品,付定金时间内,折扣价=预售价")
kuadian_promotion_info_field = Field(
..., title="跨店满减信息", description='例如: ["每100减20","每200减50"]'
)
date_time_field = Field(..., title="数据添加时间", description="")
min_commission_rate_field = Field(..., title="", description="")
order_type = Field(..., title="订单类型", description="")
taobao_url_field = Field(..., title="商品URL")
coupon_click_url_field = Field(..., title="二合一推广链接,已经自动拼接S券")
shorturl_field = Field(..., title="淘宝短链接")
tkl_field = Field(..., title="淘口令") | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/fields.py | fields.py |
from typing import Optional
from pydantic import BaseModel
from . import fields
class BatchItemModel(BaseModel):
code: Optional[str] = fields.code_field
type_one_id: Optional[str] = fields.type_one_id_field
tao_id: Optional[str] = fields.tao_id_field
title: Optional[str] = fields.title_field
jianjie: Optional[str] = fields.jianjie_field
pict_url: Optional[str] = fields.pict_url_field
user_type: Optional[str] = fields.user_type_field
seller_id: Optional[str] = fields.seller_id_field
shop_dsr: Optional[str] = fields.shop_dsr_field
volume: Optional[str] = fields.volume_field
size: Optional[str] = fields.size_field
quanhou_jiage: Optional[str] = fields.quanhou_jiage_field
date_time_yongjin: Optional[str] = fields.date_time_yongjin_field
tkrate3: Optional[str] = fields.tkrate3_field
yongjin_type: Optional[str] = fields.yongjin_type_field
coupon_id: Optional[str] = fields.coupon_id_field
coupon_start_time: Optional[str] = fields.coupon_start_time_field
coupon_end_time: Optional[str] = fields.coupon_end_time_field
coupon_info_money: Optional[str] = fields.coupon_info_money_field
coupon_total_count: Optional[str] = fields.coupon_total_count_field
coupon_remain_count: Optional[str] = fields.coupon_remain_count_field
coupon_info: Optional[str] = fields.coupon_info_field
juhuasuan: Optional[str] = fields.juhuasuan_field
taoqianggou: Optional[str] = fields.taoqianggou_field
haitao: Optional[str] = fields.haitao_field
jiyoujia: Optional[str] = fields.jiyoujia_field
jinpaimaijia: Optional[str] = fields.jinpaimaijia_field
pinpai: Optional[str] = fields.pinpai_field
pinpai_name: Optional[str] = fields.pinpai_name_field
yunfeixian: Optional[str] = fields.yunfeixian_field
nick: Optional[str] = fields.nick_field
small_images: Optional[str] = fields.small_images_field
white_image: Optional[str] = fields.white_image_field
tao_title: Optional[str] = fields.tao_title_field
provcity: Optional[str] = fields.provcity_field
shop_title: Optional[str] = fields.shop_title_field
zhibo_url: Optional[str] = fields.zhibo_url_field
sellCount: Optional[str] = fields.sellCount_field
commentCount: Optional[str] = fields.commentCount_field
favcount: Optional[str] = fields.favcount_field
score1: Optional[str] = fields.score1_field
score2: Optional[str] = fields.score2_field
score3: Optional[str] = fields.score3_field
creditLevel: Optional[str] = fields.creditLevel_field
shopIcon: Optional[str] = fields.shopIcon_field
pcDescContent: Optional[str] = fields.pcDescContent_field
item_url: Optional[str] = fields.item_url_field
category_id: Optional[str] = fields.category_id_field
category_name: Optional[str] = fields.category_name_field
level_one_category_id: Optional[str] = fields.level_one_category_id_field
level_one_category_name: Optional[str] = fields.level_one_category_name_field
tkfee3: Optional[str] = fields.tkfee3_field
biaoqian: Optional[str] = fields.biaoqian_field
tag: Optional[str] = fields.tag_field
date_time: Optional[str] = fields.date_time_field
presale_discount_fee_text: Optional[str] = fields.presale_discount_fee_text_field
presale_tail_end_time: Optional[str] = fields.presale_tail_end_time_field
presale_tail_start_time: Optional[str] = fields.presale_tail_start_time_field
presale_end_time: Optional[str] = fields.presale_end_time_field
presale_start_time: Optional[str] = fields.presale_start_time_field
presale_deposit: Optional[str] = fields.presale_deposit_field
min_commission_rate: Optional[str] = fields.min_commission_rate_field | ztk-api | /ztk_api-0.3.3-py3-none-any.whl/ztk_api/batch_item_model.py | batch_item_model.py |
===============
ZTM api wrapper
===============
Fetch data from ZTM API
.. image:: https://img.shields.io/pypi/v/ztm.svg
:target: https://pypi.python.org/pypi/ztm
.. image:: https://img.shields.io/travis/wooyek/ztm.svg
:target: https://travis-ci.org/wooyek/ztm
.. image:: https://readthedocs.org/projects/ztm/badge/?version=latest
:target: https://ztm.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://coveralls.io/repos/github/wooyek/ztm/badge.svg?branch=develop
:target: https://coveralls.io/github/wooyek/ztm?branch=develop
:alt: Coveralls.io coverage
.. image:: https://codecov.io/gh/wooyek/ztm/branch/develop/graph/badge.svg
:target: https://codecov.io/gh/wooyek/ztm
:alt: CodeCov coverage
.. image:: https://api.codeclimate.com/v1/badges/0e7992f6259bc7fd1a1a/maintainability
:target: https://codeclimate.com/github/wooyek/ztm/maintainability
:alt: Maintainability
.. image:: https://img.shields.io/github/license/wooyek/ztm.svg
:target: https://github.com/wooyek/ztm/blob/develop/LICENSE
:alt: License
.. image:: https://img.shields.io/twitter/url/https/github.com/wooyek/ztm.svg?style=social
:target: https://twitter.com/intent/tweet?text=Wow:&url=https://github.com/wooyek/ztm
:alt: Tweet about this project
.. image:: https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg
:target: https://saythanks.io/to/wooyek
Features
--------
* Fetch GPS position and append to CSV file
Quickstart
----------
Install and run ZTM api wrapper::
$ curl https://raw.githubusercontent.com/mitsuhiko/pipsi/master/get-pipsi.py | python
$ pipsi install ztm
$ ztm --apikey <APIKEY> fetch --line 150 --line 250
$ cat ztm.csv
Running Tests
-------------
Does the code actually work?::
$ git clone https://github.com/wooyek/ztm.git
$ cd ztm
$ curl https://raw.githubusercontent.com/mitsuhiko/pipsi/master/get-pipsi.py | python
$ pipsi install pew
$ pew new -p python3 -a $(pwd) $(pwd | xargs basename)
$ pip install -r requirements/development.txt
$ pipsi install tox
$ tox
We recommend using pipsi_ for installing pew_ and tox_ but a legacy approach to creating virtualenv and installing requirements should also work.
Please install `requirements/development.txt` to setup virtual env for testing and development.
Credits
-------
This package was created with Cookiecutter_ and the `wooyek/cookiecutter-pylib`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`wooyek/cookiecutter-pylib`: https://github.com/wooyek/cookiecutter-pylib
.. _`pipsi`: https://github.com/mitsuhiko/pipsi
.. _`pew`: https://github.com/berdario/pew
.. _`tox`: https://tox.readthedocs.io/en/latest/
| ztm | /ztm-0.1.12.tar.gz/ztm-0.1.12/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/wooyek/ztm/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
ZTM api wrapper could always use more documentation, whether as part of the
official ZTM api wrapper docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/wooyek/ztm/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `ztm` for local development.
1. Fork the `ztm` repo on github.com
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/ztm.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv ztm
$ cd ztm/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 src tests
$ tox -e check
$ pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python versions mentioned in tox.ini file. Check
https://travis-ci.org/wooyek/ztm/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests/test_*
| ztm | /ztm-0.1.12.tar.gz/ztm-0.1.12/CONTRIBUTING.rst | CONTRIBUTING.rst |
import logging
import os
import shutil
import sys
import webbrowser
from collections import OrderedDict
from itertools import chain
from pathlib import Path
# noinspection PyPackageRequirements
from urllib.request import pathname2url
from invoke import call, task
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(thread)-5d %(filename)s:%(lineno)s | %(funcName)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# logging.getLogger().setLevel(logging.INFO)
# logging.disable(logging.NOTSET)
logging.debug('Loading %s', __name__)
log = logging.getLogger(__name__)
is_win = sys.platform == 'win32'
ROOT_DIR = Path(__file__).parent.absolute()
def get_current_version():
from configparser import ConfigParser
cfg = ConfigParser()
cfg.read(str(Path(ROOT_DIR) / 'setup.cfg'))
current_version = cfg.get('bumpversion', 'current_version')
return current_version
# noinspection PyUnusedLocal
@task
def version(ctx):
print("Version: " + get_current_version())
@task
def clean(ctx):
"""Remote temporary files"""
for item in chain(Path(ROOT_DIR).rglob("*.pyc"), Path(ROOT_DIR).rglob("*.pyo")):
logging.debug("Deleting: %s", item)
item.unlink()
log.info("Removing __pycache__ in sys.path folders")
for folder in sys.path:
for item in Path(folder).rglob("__pycache__"):
logging.debug("Deleting: %s", item)
shutil.rmtree(str(item), ignore_errors=True)
folders = (
ROOT_DIR / 'build',
ROOT_DIR / 'example_project' / '.eggs',
ROOT_DIR / '.eggs',
ROOT_DIR / '.tox',
ROOT_DIR / '.tmp',
ROOT_DIR / '.coverage',
ROOT_DIR / '.htmlcov',
ROOT_DIR / '.pytest_cache',
ROOT_DIR / '.cache',
)
for folder in folders:
print("Removing folder {}".format(folder))
shutil.rmtree(str(folder), ignore_errors=True)
ctx.run('git checkout -- .tmp')
@task
def check(ctx):
"""Check project codebase cleanness"""
ctx.run("flake8 src tests setup.py manage.py")
ctx.run("isort --check-only --diff --recursive src tests setup.py")
ctx.run("python setup.py check --strict --metadata --restructuredtext")
ctx.run("check-manifest --ignore .idea,.idea/* .")
ctx.run("pytest --cov=src --cov=tests --cov-fail-under=5")
@task
def coverage(ctx):
ctx.run("pytest --cov=src --cov=tests --cov-fail-under=5 --cov-report html")
webbrowser.open("file://" + pathname2url(str(ROOT_DIR / '.tmp' / 'coverage' / 'index.html')))
@task
def isort(ctx):
"""Check project codebase cleanness"""
ctx.run("isort --recursive src tests setup.py")
@task
def detox(ctx):
"""Run detox with a subset of envs and report run separately"""
envs = ctx.run("tox -l").stdout.splitlines()
envs.remove('clean')
envs.remove('report')
envs = [e for e in envs if not e.startswith('py2')]
log.info("Detox a subset of environments: %s", envs)
ctx.run("tox -e clean")
ctx.run("detox --skip-missing-interpreters -e " + ",".join(envs))
ctx.run("tox -e report")
@task
def register_pypi(ctx):
"""Register project on PyPi"""
ctx.run("git checkout master")
ctx.run("python setup.py register -r pypi")
@task
def register_pypi_test(ctx):
"""Register project on TEST PyPi"""
ctx.run("git checkout master")
ctx.run("python setup.py register -r pypitest")
@task
def upload_pypi(ctx):
"""Upload to PyPi"""
ctx.run("python setup.py sdist upload -r pypi")
ctx.run("python setup.py bdist_wheel upload -r pypi")
@task(clean)
def dist(ctx):
"""Build setuptools dist package"""
ctx.run("python setup.py sdist")
ctx.run("python setup.py bdist_wheel")
ctx.run("ls -l dist")
@task(clean)
def install(ctx):
"""Install setuptools dist package"""
ctx.run("python setup.py install")
@task
def sync(ctx):
"""Sync master and develop branches in both directions"""
ctx.run("git checkout develop")
ctx.run("git pull origin develop --verbose")
ctx.run("git checkout master")
ctx.run("git pull origin master --verbose")
ctx.run("git checkout develop")
ctx.run("git merge master --verbose")
ctx.run("git checkout develop")
@task(sync)
def sync_master(ctx):
ctx.run("git checkout master")
ctx.run("git merge develop --verbose")
ctx.run("git checkout develop")
ctx.run("git merge master --verbose")
ctx.run("git push origin develop --verbose")
ctx.run("git push origin master --verbose")
ctx.run("git push --follow-tags")
@task()
def bump(ctx):
"""Increment version number"""
# ctx.run("bumpversion patch --no-tag")
ctx.run("bumpversion patch")
@task()
def pip_compile(ctx):
"""Upgrade frozen requirements to the latest version"""
ctx.run('pip-compile requirements/production.txt -o requirements/lock/production.txt --verbose --upgrade')
ctx.run('sort requirements/lock/production.txt -o requirements/lock/production.txt')
ctx.run('git add requirements/lock/*.txt')
if ctx.run('git diff-index --quiet HEAD', warn=True).exited != 0:
ctx.run('git commit -m "Requirements compiled by pip-compile" --allow-empty')
@task()
def pipenv(ctx):
"""Upgrade frozen requirements to the latest version"""
ctx.run('pipenv install -r requirements/production.txt')
ctx.run('pipenv install --dev -r requirements/development.txt')
ctx.run('pipenv lock --requirements > requirements/lock/production.txt')
ctx.run('pipenv lock --requirements --dev | grep -v "/multiinfo-python" -- > requirements/lock/development.txt')
ctx.run('pipenv graph --reverse -- > requirements/lock/graph.txt')
ctx.run('sort requirements/lock/production.txt -o requirements/lock/production.txt')
ctx.run('sort requirements/lock/development.txt -o requirements/lock/development.txt')
ctx.run('git add Pipfile Pipfile.lock requirements/lock/*.txt')
ctx.run('git commit -m "Requirements locked by pipenv"')
# noinspection PyUnusedLocal
@task(check, sync, detox)
def release_start(ctx):
"""Start a release cycle with publishing a release branch"""
ctx.run("git flow release start v{}-release".format(get_current_version()))
ctx.run("git merge master --verbose")
ctx.run("bumpversion patch --no-tag --verbose ")
ctx.run("git flow release --verbose publish")
# noinspection PyUnusedLocal
@task(check, sync, detox, post=[])
def release_finish(ctx):
"""Finish a release cycle with publishing a release branch"""
ctx.run("git flow release finish --fetch --push")
# noinspection PyUnusedLocal
@task(isort, check, pip_compile, sync, detox, bump, sync_master)
def release(ctx):
"""Build new package version release and sync repo"""
# noinspection PyUnusedLocal
@task(release, post=[upload_pypi])
def publish(ctx):
"""Merge develop, create and upload new version"""
ctx.run("git checkout master")
ctx.run("git merge develop --verbose") | ztm | /ztm-0.1.12.tar.gz/ztm-0.1.12/tasks.py | tasks.py |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install ZTM api wrapper, run this command in your terminal:
.. code-block:: console
$ pip install ztm
This is the preferred method to install ZTM api wrapper, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
You can either clone the public repository:
.. code-block:: console
$ git clone [email protected]:wooyek/ztm.git
Or download the download source from `project website`_. Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _project website: https://github.com/wooyek/ztm
| ztm | /ztm-0.1.12.tar.gz/ztm-0.1.12/docs/installation.rst | installation.rst |
==================================================
ztools
==================================================
ToolBox for Python, Easy to Use.
概述
====
ztools模块封装了使用Python语言编写的工具。
- 工程主页: https://github.com/zoumingzhe/ztools
- 下载地址: https://pypi.python.org/pypi/ztools
- 法律许可: MIT license_ , (C) 2018-2020 ZouMingzhe_ <[email protected]>
安装
====
通过pip命令安装ztools模块:
``pip install ztools``
如果已经安装ztools模块,可以通过pip命令更新:
``pip install --upgrade ztools``
更多安装信息请查阅 安装文档_ 。
工具
====
- timeout: https://github.com/zoumingzhe/ztools/blob/master/ztools/ztools/tool/timeout.py
超时,提供超时判断。
- progressbar: https://github.com/zoumingzhe/ztools/blob/master/ztools/ztools/tool/progressbar.py
进度条,提供进度条显示。
- plot: https://github.com/zoumingzhe/ztools/blob/master/ztools/ztools/GUI/plot.py
绘图,提供基于matplotlib的绘图功能。
- filebase: https://github.com/zoumingzhe/ztools/blob/master/ztools/ztools/file/filebase.py
文件,提供文件访问与相关操作。
- xls: https://github.com/zoumingzhe/ztools/blob/master/ztools/ztools/file/xls.py
Excle,提供对.xls文件访问与相关操作。
- MySQL: https://github.com/zoumingzhe/ztools/blob/master/ztools/ztools/db/MySQL.py
MySQL数据库,提供基于pymysql的MySQL数据库访问。
文档
====
API(应用程序接口)文档、用法和例程可查阅 documentation_ 目录下的文档。
- .rst后缀文件可以在任何文本编辑器中查看或者使用 Sphinx_ 转换成HTML或PDF格式
例程
====
例程存放在工程文件的 examples_ 目录下。
测试
====
单元测试存放在工程文件的 test_ 目录下。
支持
====
本项目由 ZouMingzhe_ <[email protected]> 创建并维护。
为了完善ztools模块的功能,欢迎您贡献代码至: https://github.com/zoumingzhe/ztools
语言
====
简体中文_
English_
.. _ZouMingzhe: https://zoumingzhe.github.io
.. _简体中文: https://github.com/zoumingzhe/ztools/blob/master/README.rst
.. _English: https://github.com/zoumingzhe/ztools/blob/master/documentation/en/README.rst
.. _license: https://github.com/zoumingzhe/ztools/blob/master/LICENSE.txt
.. _安装文档: https://github.com/zoumingzhe/ztools/tree/master/documentation/ztools.rst#installation
.. _documentation: https://github.com/zoumingzhe/ztools/tree/master/documentation
.. _examples: https://github.com/zoumingzhe/ztools/tree/master/examples
.. _test: https://github.com/zoumingzhe/ztools/tree/master/test
.. _Python: http://python.org/
.. _Sphinx: http://sphinx-doc.org/ | ztools | /ztools-2.4.7.tar.gz/ztools-2.4.7/README.rst | README.rst |
## Synopsis
Provides Zero Touch Provisioning functions for Fortinet assets using FortiManager.
Initial Setup
#############
ZTPLite requires a system that is running python3
To install ztplite is simple. All that is required is to run:
.. code-block:: python
pip install ztplite
A good practice is always to perform python package installs from a virtual environment so as not to interfere with the system requirements. See the python docs or other documents about virtual environments (venv or virtualenv) such as `Virtual Environment Docs <https://docs.python.org/3/tutorial/venv.html>`_
Program Usage
*************
To get ZTPLite working, a python module acting as a driver must be called. This driver is minimal and must call the :ref:`Controller's<Controller Module>` *provision_unregistered_devices* function. Once that call is made, the rest of the program runs automatically.
To define the driver (we will call it *ztpl_main.py*), things could not be simpler. As stated before it simply needs to call a function within the :ref:`Controller<Controller Module>` that looks to provision unregistered devices called (drumroll please) *provision_unregistered_devices*. A full driver module is provided here:
.. code-block:: python
#!/usr/bin/env python3
from ztplite import ztplcontroller
def main():
ztplcontroller.provision_unregistered_devices()
if __name__ == "__main__":
main()
In the case that follows the calling module (remember that we named it *ztpl_main.py*) is executed. Shown below is the call and an explanation of the arguments string that would be used for an example run....so:
.. code-block:: python
python ztpl_main.py -s 10.1.1.1 -o /home/fortinet/ztpdemo/ztplite.log -c /home/fortinet/ztpdemo/configyml.yml -i /home/fortinet/ztpdemo/instructions.json --ssl -D
- This would call a driver python module named *ztpl_main.py* with the target FMG at 10.1.1.1
- The information log would be at /home/fortinet/ztpdemo/ztplite.log
- The configuration file is the yaml file at /home/fortinet/ztpdemo/configyml.yml
- The instruction file is a json file at /home/fortinet/ztpdemo/instructions.json
- The conversation with the FMG will utilize ssl
- The code will be running in Debug mode as well so the default debug log will be in affect. If the call needed to place the debug log specifically, the -d flag should have been used as discussed below in the :ref:`Additional Argument Information` section
Clearly the above call would be hosted in a cron job somewhere or within a task that called it from within a virtual environment every hour or day or whatever timeframe the customer found acceptable. Any other task management process (other than cron) could be used to call the program of course.
Additional Argument Information
*******************************
ztpl_main.py [-h] [-i INSTRUCTION_FILE] [-c CONFIG_FILE]
[-s FMG_ADDRESS] [-u FMG_UNAME] [-p FMG_PWORD]
[-o LOG_LOCATION] [-d DEBUG_LOG_LOCATION]
[--use_syslog] [--syslog_addr SYSLOG_ADDR] [--syslog_port SYSLOG_PORT] [--syslog_fac SYSLOG_FAC]
[--new_pass NEW_PASS] [-v V] [-D] [--run_test] [--ssl]
- Configuration:
-i, --instruction_file INSTRUCTION_FILE_PATH: Instruction file path providing text to code structure. Defaults to ztpltexttocode.json in the directory where the main driver module is called
-c, --config_file CONFIG_FILE: Config file path providing a location where the configuration file is located. No default is provided and this is a requirement
-s, --fmg_address FMG_ADDR_STRING: FMG address or FQDN to FMG performing the ZTP operations
- Authentication:
-u, --fmg_uname FMG_UNAME: FMG username used for authentication. Default is *admin*
-p, --fmg_pword FMG_PWORD: FMG password used for authentication. Default is a blank password
- Local Logging:
-o, --log_location LOG_LOCATION: Standard log location. Defaults to a log in the local directory named *ztplite.log*
-d, --debug_log_location DEBUG_LOG_LOCATION: Debug Log location, default is the current directory with a filename of *debug.log*
- Remote Logging:
--use_syslog: Sets requirement for syslog logging. Default is *false*
--syslog_addr SYSLOG_ADDR: IP address of listening syslog server. Defaults to */dev/log* for local host logging.
--syslog_port SYSLOG_PORT: Port used for syslog server. Default is *514*
--syslog_fac SYSLOG_FAC: Facility for syslog server logging. Default is *user*
- Optional Arguments:
-h, --help: Display this help message and exit
--new_pass NEW_PASSWORD: New Password for all FortiGates during this process
-v: Runs code in verbose mode. Append multiple letter v for verbosity (i.e. -vvvv)
-D, --debug: Run in debug mode. Enables debug logging and console debugging
--ssl: Connect to FMG using SSL. Default is *false*
--run_test: Run a test to determine if the instruction file has correct requirements in the DATA-REQ list. No actual modifications will take place if run_test is set
| ztplite | /ztplite-0.2.1.tar.gz/ztplite-0.2.1/README.md | README.md |
[](https://travis-ci.org/arista-eosplus/ztpserver)
Quick Overview
=====================
ZTPServer provides a bootstrap environment for Arista EOS based products. ZTPserver interacts with the ZeroTouch Provisioning (ZTP) mode of Arista EOS. The default ZTP start up mode triggers an unprovisioned Arista EOS nodes to enter a bootstrap readdy state if a valid configuration file is not already present on the internal flash storage.
ZTPServer provides a number of configurable bootstrap operation workflows that extend beyond simply loading an configuration and boot image. It provides the ability to define the target node through the introduction of definitions and templates that call pre-built actions and statically defined or dynamically generated attributes. The attributes and actions can also be extended to provide custom functionality that are specific to a given implementation. ZTPServer also provides a topology validation engine with a simple syntax to express LLDP neighbor adjacencies. It is written mostly in Python and leverages standard protocols like DHCP and DHCP options for boot functions, HTTP for bi-directional transport, and XMPP and syslog for logging. Most of the files that the user interacts with are YAML based.
ZTPServer Features
==================
* Automated configuration file generation and application
* Image and file system validation and standardization
* Connectivity validation and topology based auto-provisioning
* Config and device templates with resource allocation for dynamic deployments
* Zero touch replacement and upgrade capabilities
* User extensible actions
* Email, XMPP, syslog based logging and accounting of all processes
Docs
====
[ZTPServer official documentation](http://ztpserver.readthedocs.org/) is built and hosted at (http://ReadTheDocs.org/).
Contributing
============
Please see the [CONTRIBUTING.md](CONTRIBUTING.md) file for additional information.
Support
=======
* [Mailing List](https://groups.google.com/forum/#!forum/eosplus)
* [email protected]
* IRC: irc.freenode.net#arista
Dependencies
============
Server
======
* Python 3.7 or later (https://www.python.org/downloads)
* routes 2.5 or later (https://pypi.python.org/pypi/Routes)
* webob 1.8 or later (http://webob.org/)
* PyYaml 6.0 or later (http://pyyaml.org/)
Client
======
* Arista EOS 4.12.0 or later
License
=======
BSD-3, See LICENSE file
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/README.md | README.md |
Contributing to ZTPServer
=========================
Arista EOS+ ZTPServer provides a community based implementation of a bootstrap solution for working with Arista EOS switches. This document provides different options for contributing to this project.
Contributing Code
=================
Arista EOS+ ZTPServer provides all of its source available to anyone on Github at github.com/arista-eosplus/ztpserver. The project is freely available to anyone to fork and use in their own implementations. The Arista EOS+ community gladly accepts pull requests that add new features, enhance existing features or fix bugs.
All contributed code should be done using pull requests. Once a pull request is initiated, a member of the Arista EOS+ community will review the code and either accept it as is or provide feedback on things to change.
As a procedural note, all pull requests that add new features and/or enhance the operation of ZTPServer are expected to have corresponding test cases with them. Pull requests will be not accepted without them.
Feature Requests
================
Another available way to contribute to this project is to provide requests for features. Feature requests should be submitted through Github issues for this project and tagged as enhancement and server or client or action. All enhancement requests are included as part of the planning process and will be considered for implementation
Bugs
====
If you happen to find a bug with ZTPServer, please open an issue and flag it as a bug. In the issue description please provide details about what you were trying to do, any errors and/or tracebacks and any other information necessary to replicate the bug.
Contact
=======
If you have any additional questions about ZTPServer please send an email to [email protected]
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/CONTRIBUTING.md | CONTRIBUTING.md |
Support
=======
.. contents:: :local:
Contact
~~~~~~~
ZTPServer is an Arista-led open source community project. Users and developers are encouraged to contribute to the project. See `CONTRIBUTING <https://github.com/arista-eosplus/ztpserver/blob/develop/CONTRIBUTING.md>`_ for more details.
Before requesting support, please collect the necessary data to include. See :ref:`before-requesting-support`.
Commercial support may be purchased through your Arista account team.
Community-based support is available through:
* `eosplus forum <https://groups.google.com/forum/#!forum/eosplus>`_
* [email protected].
* IRC: irc.freenode.net#arista
Customization, and integration services are available through the EOS+ Consulting Services team at `Arista Networks, Inc <http://arista.com/>`_. Contact [email protected] or your account team for details.
Known caveats
~~~~~~~~~~~~~
.. contents:: :local:
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
* Only a single entry in a file-based resource pool may be allocated to a node (using the ``allocate(resource_pool`` plugin)).
* Users MUST be aware of the required EOS version for various hardware components (including transcievers). Neighbor (LLDP) validation may fail if a node boots with an EOS version that does not support the installed hardware. Moreoever, some EOS features configured via ZTPServer might be unsupported. Please refer to the Release Notes for more compatability information and to the `Transceiver Guide <http://www.arista.com/assets/data/pdf/Transceiver-Guide.pdf>`_ .
* If a lot of nodes are being booted at the same time and they all share the same file-based resource files (using the ``allocate(resource_pool`` plugin)), retrieving the definition for each might be slow (5s or longer) if the resource files are very large. The workaround is to use another plugin or custom actions and allocate the resources from alternative sources (other than shared files) - e.g. SQL
Releases
~~~~~~~~
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
.. toctree::
:maxdepth: 2
:titlesonly:
ReleaseNotes2.0.0
ReleaseNotes1.6.0
ReleaseNotes1.5.0
ReleaseNotes1.4.1
ReleaseNotes1.4
ReleaseNotes1.3.2
ReleaseNotes1.3.1
ReleaseNotes1.3
ReleaseNotes1.2
ReleaseNotes1.1
Roadmap highlights
~~~~~~~~~~~~~~~~~~
The authoritative state, including the intended release, for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_. The information provided here is current at the time of publishing but is subject to change. Please refer to the latest information in GitHub issues by filtering on the desired `milestone <https://github.com/arista-eosplus/ztpserver/milestones>`_.
Release 1.5
-----------
Target: January 2016
* topology-based ZTR (`103 <https://github.com/arista-eosplus/ztpserver/pull/103>`_)
* ZTPServer Cookbook - advanced topics (`289 <https://github.com/arista-eosplus/ztpserver/pull/289>`_)
* benchmark scale tests (`261 <https://github.com/arista-eosplus/ztpserver/pull/261>`_)
Release 2.0
-----------
Target: March 2016
* configure HTTP timeout in bootstrap.conf (`246 <https://github.com/arista-eosplus/ztpserver/pull/246>`_)
* all requests from the client should contain the unique identifier of the node (`188 <https://github.com/arista-eosplus/ztpserver/pull/188>`_)
* dual-sup support for install_extension action (`180 <https://github.com/arista-eosplus/ztpserver/pull/180>`_)
* dual-sup support for install_cli_plugin action (`179 <https://github.com/arista-eosplus/ztpserver/pull/179>`_)
* dual-sup support for copy_file action (`178 <https://github.com/arista-eosplus/ztpserver/pull/178>`_)
* action for arbitrating between MLAG peers (`141 <https://github.com/arista-eosplus/ztpserver/pull/141>`_)
* plugin infrastructure for resource pool allocation (`121 <https://github.com/arista-eosplus/ztpserver/pull/121>`_)
* md5sum checks for all downloaded resources (`107 <https://github.com/arista-eosplus/ztpserver/pull/107>`_)
* topology-based ZTR (`103 <https://github.com/arista-eosplus/ztpserver/pull/103>`_)
Tutorial
~~~~~~~~~~~~~~
See https://eos.arista.com/quick-and-easy-veos-lab-setup/.
Other Resources
~~~~~~~~~~~~~~~
ZTPServer documentation and other reference materials are below:
* `GitHub ZTPServer Repository <https://github.com/arista-eosplus/ztpserver>`_
* ZTPServer `wiki <https://github.com/arista-eosplus/ztpserver/wiki>`_
* `Packer VM <https://github.com/arista-eosplus/packer-ztpserver>`_ build process
* `ZTPServer <https://pypi.python.org/pypi/ztpserver>`_ Python (PyPI) package
* `YAML Code Validator <http://yamllint.com/>`_
* `ZTPServer WSGI Benchmarking <https://eos.arista.com/ztpserver-benchmarking-the-webserver-gateway-interface>`_
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/support.rst | support.rst |
Release 1.3.2
-------------
(Published March, 2015)
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
Bug fixes
^^^^^^^^^
* Prevented .node file from becoming corrupted on the server (`298 <https://github.com/arista-eosplus/ztpserver/issues/298>`_)
.. comment
* Added .node filename to server-side logs (`297 <https://github.com/arista-eosplus/ztpserver/issues/297>`_)
.. comment
* Change ``refresh_ztps`` script default to "master"
Refresh_ztps will, by default, update the installation to the latest released version. Previously, the default was to the development branch which may still be accomplished with ``refresh_ztps --branch develop``.
* Fixes to RPM packaging:
- Quieted chcon during install (`295 <https://github.com/arista-eosplus/ ztpserver/issues/295>`_)
.. comment
- Fixed issue where config files may not be kept during upgrade (`296 <https://github.com/arista-eosplus/ ztpserver/issues/296>`_)
.. comment
- Fixed issue with native rpmbuild due to changes in handling VERSION (`294 <https://github.com/arista-eosplus/ ztpserver/issues/294>`_)
.. comment
* Documentation updates:
- Troubleshooting chapter (`272 <https://github.com/arista-eosplus/ ztpserver/issues/272>`_)
.. comment
- Additional content in the ZTP Server Cookbook (`289 <https://github.com/arista-eosplus/ ztpserver/issues/289>`_)
.. comment
- ZTP Server benchmarking results
.. comment
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.3.2.rst | ReleaseNotes1.3.2.rst |
Release 1.2
-----------
(Published December, 2014)
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
Enhancements
^^^^^^^^^^^^
* Enhance neighbordb documentation (`255 <https://github.com/arista-eosplus/ztpserver/issues/255>`_)
.. comment
* In case of failure, bootstrap cleanup removes temporary files that were copied onto switch during provisioning (`253 <https://github.com/arista-eosplus/ztpserver/issues/253>`_)
.. comment
* "ERROR: unable to disable COPP" should be a warning on old EOS platforms (`242 <https://github.com/arista-eosplus/ztpserver/issues/242>`_)
A detailed warning will be displayed if disabling COPP fails (instead of an error).
* Enhance documentation for open patterns(`239 <https://github.com/arista-eosplus/ztpserver/issues/239>`_)
.. comment
* Document guidelines on how to test ZTPS (`235 <https://github.com/arista-eosplus/ztpserver/issues/235>`_)
.. comment
* Document http://www.yamllint.com/ as a great resource for checking YAML files syntax (`234 <https://github.com/arista-eosplus/ztpserver/issues/234>`_)
.. comment
* Make ”name" an optional attribute in local pattern files (`233 <https://github.com/arista-eosplus/ztpserver/issues/233>`_)
node pattern file can contain only the interfaces directive now
e.g.
::
interfaces:
- any:
device: any
port: any
* Documentation should clarify that users must be aware of the EOS version in which certain transceivers were introduced (`232 <https://github.com/arista-eosplus/ztpserver/issues/232>`_)
.. comment
* Enhance the Apache documentation (`231 <https://github.com/arista-eosplus/ztpserver/issues/231>`_)
.. comment
* Enhance documentation related to config files (`229 <https://github.com/arista-eosplus/ztpserver/issues/229>`_)
.. comment
* Disable meta information checks for remote URLs (`224 <https://github.com/arista-eosplus/ztpserver/issues/224>`_)
- if URL points to ZTP server and destination is on flash, use metadata request to compute disk space (other metadata could be added here in the future)
- it URL points to a remote server and destination is on flash, use 'content-length' to compute disk space - this will skip the metadata request
* Assume port 514 for remote syslog, if missing from bootstrap.conf (`218 <https://github.com/arista-eosplus/ztpserver/issues/218>`_)
When configuring remote syslog destinations in bootstrap.conf, the port number is not mandatory anymore (if missing, a default value of 514 is assumed).
e.g.
::
logging:
- destination: pcknapweed
level: DEBUG
* Deal more gracefully with YAML errors in neighbordb (`216 <https://github.com/arista-eosplus/ztpserver/issues/216>`_)
YAML serialization errors are now exposed in ZTPS logs:
::
DEBUG: [controller:170] JPE14140273: running post_node
ERROR: [topology:83] JPE14140273: failed to load file: /usr/share/ztpserver/neighbordb
ERROR: [topology:116] JPE14140273: failed to load neighbordb:
<b>expected a single document in the stream
in "<string>", line 26, column 1:
patterns:
^
but found another document
in "<string>", line 35, column 1:
---
^</b>
DEBUG: [controller:182] JPE14140273: response to post_node: {'status': 400, 'body': '', 'content_type': 'text/html'}
s7056.lab.local - - [03/Nov/2014 21:05:33] "POST /nodes HTTP/1.1" 400 0
* Deal more gracefully with DNS/connectivity errors while trying to access remote syslog servers (`215 <https://github.com/arista-eosplus/ztpserver/issues/215>`_)
Logging errors (e.g. bogus destination) will not be automatically logged by the bootstrap script. In order to debug logging issues, simply uncomment the following lines in the bootstrap script:
::
#---------------------------------SYSLOG----------------------
# Comment out this section in order to enable syslog debug
# logging
logging.raiseExceptions = False
#---------------------------------XMPP------------------------
Example of output which is suppressed by default:
::
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/handlers.py", line 806, in emit
self.socket.sendto(msg, self.address)
gaierror: [Errno -2] Name or service not known
Logged from file bootstrap, line 163
* Make ”name" an optional attribute in node definitions (`214 <https://github.com/arista-eosplus/ztpserver/issues/214>`_)
Definitions under /nodes/<NODE> do not have to have a 'name' attribute.
* Increase HTTP timeout in bootstrap script (`212 <https://github.com/arista-eosplus/ztpserver/issues/212>`_)
HTTP timeout in bootstrap script is now 30s. https://github.com/arista-eosplus/ztpserver/issues/246 tracks making that configurable via bootstrap.conf. In the meantime, the workaround for changing it is manually editing the bootstrap file.
* Remove fake prefixes from client and actions function names in docs (`204 <https://github.com/arista-eosplus/ztpserver/issues/204>`_)
.. comment
* Tips and tricks - clarify vEOS version for both ways to set system MAC (`203 <https://github.com/arista-eosplus/ztpserver/issues/203>`_)
.. comment
* Enhance logging for "copy_file" action (`187 <https://github.com/arista-eosplus/ztpserver/issues/187>`_)
* Local interface pattern specification should also allow management interfaces (`185 <https://github.com/arista-eosplus/ztpserver/issues/185>`_)
Local interface allows for:
- management interface or interface range, using either mXX, maXX, MXX, MaXX, ManagementXX (where XX is the range)
- management + ethernet specification on the same line: Management1-3,Ethernet3,5,6/7
* Bootstrap script should cleanup on failure (`176 <https://github.com/arista-eosplus/ztpserver/issues/176>`_)
::
$ python bootstrap --help
usage: bootstrap [options]
optional arguments:
-h, --help show this help message and exit
--no-flash-factory-restore, -n
Do NOT restore flash config to factory defaul
Added extra command-line option for the bootstrap script for testing.
Default behaviour:
- clear rc.eos, startup-config, boot-extensions (+folder) at the beginning of the process
- in case of failure, delete all new files added to flash
'-n' behaviour:
- leave rc.eos, startup-config, boot-extensions (+folder) untouched
- instead, bootstrap will create the new files corresponding to the above, with the ".ztp" suffix
- never remove any files from flash at the end of the process, regardless of the outcome
* Allow posting the startup-config to a node's folder, even if no startup-config is already present (`169 <https://github.com/arista-eosplus/ztpserver/issues/169>`_)
.. comment
* Remove definition line from auto-generated pattern (`102 <https://github.com/arista-eosplus/ztpserver/issues/102>`_)
When writing the pattern file in the node's folder (after a neighbordb match):
- 'definition' line is removed
- 'variables' and 'node' are only written if non-empty
- 'name' (that's the pattern's name) and 'interfaces' are always written
Fixed
^^^^^
* server_url requires trailing slash "/" when adding subdirectory (`244 <https://github.com/arista-eosplus/ztpserver/issues/244>`_)
.. comment
* Error when doing static node provisioning using replace_config (`241 <https://github.com/arista-eosplus/ztpserver/issues/241>`_)
.. comment
* XMPP messages are missing the system ID (`236 <https://github.com/arista-eosplus/ztpserver/issues/236>`_)
XMPP messages now contain the serial number of the switch sending the message. 'N/A' is shown if the serial number is not available or empty.
* Fix "node:" directive behaviour in neighbordb (`230 <https://github.com/arista-eosplus/ztpserver/issues/230>`_)
The following 'patterns' are now valid in neighbordb:
- name, definition, node [,variables]
- name, definition, interfaces [,variables]
- name, definition, node, interfaces [,variables]
* node.retrieve_resource should be a no-op if the file is already on the disk (`225 <https://github.com/arista-eosplus/ztpserver/issues/225>`_)
When computing the available disk space on flash for saving a file, the length of the file which is about to be overwritten is also considered.
* Ignore content-type when retrieving a resource from a remote server or improve on the error message (`222 <https://github.com/arista-eosplus/ztpserver/issues/222>`_)
If a resource is retrieved from some other server (which is NOT the ZTPServer itself), then we allow any content-type.
* ztpserver.wsgi is not installed by setup.py (`220 <https://github.com/arista-eosplus/ztpserver/issues/220>`_)
.. comment
* ztps --validate broken in 1.1 (`217 <https://github.com/arista-eosplus/ztpserver/issues/217>`_)
::
ztps --validate PATH_TO_NEIGHBORDB
can be used in order to validate the syntax of a neighbordb file.
* install_extension action copies the file to the switch but doesn't install it (`206 <https://github.com/arista-eosplus/ztpserver/issues/206>`_)
.. comment
* Bootstrap XMPP logging - client fails to create the specified MUC room (`148 <https://github.com/arista-eosplus/ztpserver/issues/148>`_)
In order for XMPP logging to work, a non-EOS user need to be connected to the room specified in bootstrap.conf, before the ZTP process starts. The room has to be created (by the non-EOS user), before the bootstrap client starts logging the ZTP process via XMPP.
* ZTPS server fails to write .node because lack of permissions (`126 <https://github.com/arista-eosplus/ztpserver/issues/126>`_)
.. comment
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.2.rst | ReleaseNotes1.2.rst |
Installation
============
.. contents:: :local:
Requirements
`````````````
**Server:**
* Python 3.7 or later (https://www.python.org/download/releases)
* routes 2.5 or later (https://pypi.python.org/pypi/Routes)
* webob 1.8 or later (http://webob.org/)
* PyYaml 6.0 or later (http://pyyaml.org/)
**Client:**
* `EOS <http://eos.arista.com>`_ 4.12.0 or later (ZTPServer 1.1+)
* `EOS <http://eos.arista.com>`_ 4.13.3 or later (ZTPServer 1.0)
.. NOTE:: We recommend using a Linux distribution which has Python 2.7 as its standard Python install (e.g. yum in Centos requires Python 2.6 and a dual Python install can be fairly tricky and buggy). This guide was written based ZTPServer v1.1.0 installed on Fedora 20.
Installation Options
````````````````````
* :ref:`packer_install`
* :ref:`pypi_install`
* :ref:`manual_install`
.. _packer_install:
Turn-key VM Creation (deprecated)
~~~~~~~~~~~~~~~~~~~~
The turn-key VM option leverages `Packer <http://www.packer.io/>`_ to auto generate a VM on your local system. Packer.io automates the creation of the ZTPServer VM. All of the required packages and dependencies are installed and configured. The current Packer configuration allows you to choose between VirtualBox or VMWare as your hypervisor and each can support Fedora 20 or Ubuntu Server 12.04.
VM Specification:
* 7GB Hard Drive
* 2GB RAM
* Hostname ztps.ztps-test.com
* eth0 (NAT) DHCP
* eth1 (hostonly) 172.16.130.10
* Firewalld/UFW disabled
* Users
* root/eosplus
* ztpsadmin/eosplus
* Python 2.7.5 with PIP
* DHCP installed with Option 67 configured (eth1 only)
* BIND DNS server installed with zone ztps-test.com
* wildcard forwarding rule passing all other queries to 8.8.8.8
* SRV RR for im.ztps-test.com
* rsyslog-ng installed; Listening on UDP and TCP (port 514)
* ejabberd (XMPP server) configured for im.ztps-test.com
* XMPP admin user: ztpsadmin/eosplus
* httpd installed and configured for ZTPServer (mod_wsgi)
* ZTPServer installed
* ztpserver-demo repo files pre-loaded
See the Packer VM `code and documentation <https://github.com/arista-eosplus/packer-ztpserver>`_ as well as the `ZTPServer demo files <https://github.com/arista-eosplus/ztpserver-demo>`_ for the Packer VM.
.. _pypi_install:
PyPI Package (pip install)
~~~~~~~~~~~~~~~~~~~~~~~~~~
`ZTPServer <https://pypi.python.org/pypi/ztpserver>`_ may be installed as a `PyPI <https://pypi.python.org/pypi/ztpserver>`_ package.
This option assumes you have a server with Python and pip pre-installed. See `installing pip <https://pip.pypa.io/en/latest/installing.html>`_.
Once pip is installed, type:
.. code-block:: console
bash-3.2$ pip install ztpserver
The pip install process will install all dependencies and run the install script, leaving you with a ZTPServer instance ready to configure.
.. _manual_install:
Manual installation
~~~~~~~~~~~~~~~~~~~
Download source:
* |git release|_ on GitHub
* |prev releases|_
* Active Stable: (|git master|_) (|zip master|_) (|tar master|_)
* Development: (|git dev|_) (|zip dev|_) (|tar dev|_)
.. |git release| replace:: Latest Release
.. _git release: https://github.com/arista-eosplus/ztpserver/releases/latest
.. |prev releases| replace:: Previous releases
.. _prev releases: https://github.com/arista-eosplus/ztpserver/releases/
.. |git master| replace:: GitHub
.. _git master: https://github.com/arista-eosplus/ztpserver/tree/master
.. |zip master| replace:: ZIP
.. _zip master: https://github.com/arista-eosplus/ztpserver/zipball/master
.. |tar master| replace:: TAR
.. _tar master: https://github.com/arista-eosplus/ztpserver/tarball/master
.. |git dev| replace:: GitHub
.. _git dev: https://github.com/arista-eosplus/ztpserver/tree/develop
.. |zip dev| replace:: ZIP
.. _zip dev: https://github.com/arista-eosplus/ztpserver/zipball/develop
.. |tar dev| replace:: TAR
.. _tar dev: https://github.com/arista-eosplus/ztpserver/tarball/develop
Once the above system requirements are met, you can use the following git command to pull the develop branch into a local directory on the server where you want to install ZTPServer:
.. code-block:: console
bash-3.2$ git clone https://github.com/arista-eosplus/ztpserver.git
Or, you may download the zip or tar archive and expand it.
.. code-block:: console
bash-3.2$ wget https://github.com/arista-eosplus/ztpserver/tarball/master
bash-3.2$ tar xvf <filename>
or
bash-3.2$ unzip <filename>
Change in to the ztpserver directory, then checkout the release desired:
.. code-block:: console
bash-3.2$ cd ztpserver
bash-3.2$ git checkout v1.1.0
Execute ``setup.py`` to build and then install ZTPServer:
.. code-block:: console
[user@localhost ztpserver]$ sudo python setup.py build
running build
running build_py
...
[root@localhost ztpserver]# sudo python setup.py install
running install
running build
running build_py
running install_lib
...
.. _upgrade:
Upgrading
`````````
Upgrading ZTP Server is based on the method of installation:
* PyPI (pip)::
sudo pip install --upgrade ztpserver
* Manual, Packer-VM, GitHub installs::
cd ztpserver/
sudo ./utils/refresh_ztps -b <branch>
The ztpserver/ directory, above, should be a git repository (where the files were checked out). The ``branch`` identifier may be any version identifier (1.3.2, 1.1), or an actual branch on github such as ``master`` (released), or ``develop`` (development).
* RPM::
sudo rpm -Uvh ztpserver-<version>.rpm
.. _server_config:
Additional services
```````````````````
.. NOTE:: If using the :ref:`packer_install`, all of the steps, below, will have been completed, please reference the VM documentation.
Allow ZTPServer Connections In Through The Firewall
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Be sure your host firewall allows incoming connections to ZTPServer. The standalone server runs on port TCP/8080 by default.
**Firewalld** examples:
* Open TCP/<port> through firewalld
``bash-3.2$ firewall-cmd --zone=public --add-port=<port>/tcp [--permanent]``
* Stop firewalld
``bash-3.2$ systemctl stop firewalld``
* Disable firewalld
``bash-3.2$ systemctl disable firewalld``
.. NOTE:: If using the :ref:`packer_install`, all the steps from below will be been completed automatically.
Configure the DHCP Service
~~~~~~~~~~~~~~~~~~~~~~~~~~
Set up your DHCP infrastructure to server the full path to the ZTPServer bootstrap file via option 67. This can be performed on any DHCP server. Below you can see how you can do that for ISC dhcpd.
Get dhcpd:
RedHat:
``bash-3.2$ sudo yum install dhcp``
Ubuntu:
``bash-3.2$ sudo apt-get install isc-dhcp-server``
Add a network (in this case 192.168.100.0/24) for servicing DHCP requests for ZTPServer::
subnet 192.168.100.0 netmask 255.255.255.0 {
range 192.168.100.200 192.168.100.205;
option routers 192.168.100.1;
option domain-name-servers <ipaddr>;
option domain-name "<org>";
# Only return the bootfile-name to Arista devices
class "Arista" {
match if substring(option vendor-class-identifier, 0, 6) = "Arista";
# Interesting bits:
# Relay agent IP address
# Option-82: Agent Information
# Suboption 1: Circuit ID
# Ex: 45:74:68:65:72:6e:65:74:31 ==> Ethernet1
option bootfile-name "http://<ztp_hostname_or_ip>:<port>/bootstrap";
}
}
Enable and start the dhcpd service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RedHat (and derivative Linux implementations)
``bash-3.2# sudo /usr/bin/systemctl enable dhcpd.service``
``bash-3.2# sudo /usr/bin/systemctl start dhcpd.service``
Ubuntu (and derivative Linux implementations)
``bash-3.2# sudo /usr/sbin/service isc-dhcp-server start``
Check that /etc/init/isc-dhcp-server.conf is configured for automatic startup on boot.
Edit the global configuration file located at ``/etc/ztpserver/ztpserver.conf`` (if needed). See the :ref:`global_configuration` options for more information.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/install.rst | install.rst |
Actions
=======
.. contents:: :local:
:mod:`add_config`
-----------------
.. automodule:: actions.add_config
:members:
:mod:`configure_ansible_client`
-------------------------------
.. automodule:: actions.configure_ansible_client
:members:
:mod:`copy_file`
----------------
.. automodule:: actions.copy_file
:members:
:mod:`install_cli_plugin`
-------------------------
.. automodule:: actions.install_cli_plugin
:members:
:mod:`install_extension`
------------------------
.. automodule:: actions.install_extension
:members:
:mod:`install_image`
--------------------
.. automodule:: actions.install_image
:members:
:mod:`replace_config`
---------------------
.. automodule:: actions.replace_config
:members:
:mod:`run_bash_script`
----------------------
.. automodule:: actions.run_bash_script
:members:
:mod:`run_cli_commands`
-----------------------
.. automodule:: actions.run_cli_commands
:members:
:mod:`send_email`
-----------------
.. automodule:: actions.send_email
:members:
..
#:mod:`template`
#---------------
#
#.. automodule:: template
# :members:
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/actions.rst | actions.rst |
Release 1.3
-----------
(Published February, 2015)
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
Enhancements
^^^^^^^^^^^^
* ``ztps --validate`` validates:
- neighbordb syntax and patterns
- resource files syntax
- definition files syntax
- pattern files syntax
::
$ ztps --validate
Validating neighbordb ('/usr/share/ztpserver/neighbordb')...
2015-01-13 18:03:55,006:ERROR:[validators:111] N/A: PatternValidator validation error: missing attribute: definition
2015-01-13 18:03:55,006:ERROR:[validators:111] N/A: NeighbordbValidator validation error: invalid patterns: set([(0, 's7151')])
ERROR: Failed to validate neighbordb patterns
Invalid Patterns (count: 1)
---------------------------
[0] s7151
Validating definitions...
Validating /usr/share/ztpserver/definitions/leaf.definition... Ok!
Validating /usr/share/ztpserver/definitions/leaf-no_vars.definition... Ok!
Validating resources...
Validating /usr/share/ztpserver/resources/leaf_man_ip... Ok!
Validating /usr/share/ztpserver/resources/leaf_spine_ip...
ERROR: Failed to validate /usr/share/ztpserver/resources/leaf_spine_ip
validator: unable to deserialize YAML data:
10.0.0.51/24: null
10.0.0.53/24: null
dfdsf dsfsd
10.0.0.54/24: JPE14140273
Error:
while scanning a simple key
in "<string>", line 3, column 1:
dfdsf dsfsd
could not found expected ':'
in "<string>", line 5, column 1:
10.0.0.54/24: JPE14140273
^
Validating nodes...
Validating /usr/share/ztpserver/nodes/JAS12170010/definition... Ok!
Validating /usr/share/ztpserver/nodes/JAS12170010/pattern... Ok!
* *run_bash_script* action allows users to run bash scripts during the bootstrap process
* *run_cli_commands* action allows users to run CLI commands during the bootstrap process
* *config-handlers* can be used in order to trigger scripts on the server on PUT startup-config request completion
* The auto **replace_config** action which is added to the definition whenever a startup-config file is present in a node's folder is now the first action in the definition which is sent to the client. This enables performing configuration updates during ZTR (Zero Touch Replacement) via 'always_execute' *add_config* actions in the definition file. One particularly interesting use-case is replacing one node with another one of a different model.
* ``ztps --clear-resources`` clears all resource allocations
* server-side logs are timestamped by default
* ZTP Server shows running version on-startup
::
# ztps
2015-02-09 16:50:35,922:INFO:[app:121] Starting ZTPServer v1.3.0...
...
Bug fixes
^^^^^^^^^
* upgrades/downgrades to/from v1.3+ will preserve the configuration files
- *ztpserver.conf*, *ztpserver.wsgi*, *bootstrap.conf* and *neighbordb* are preserved (new default files are installed under *<filename>*.new)
- all definitions, config-handlers, files, node folder, resources and files are preserved
- *bootstrap* file, actions and libraries are always overwritten
* *bootstrap.conf* now supports specifying empty config sections:
::
logging:
...
xmpp:
::
logging:
xmpp:
...
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.3.rst | ReleaseNotes1.3.rst |
Release 1.5
-----------
New Modules
^^^^^^^^^^^
Enhancements
^^^^^^^^^^^^
* Booststrap client - use unix:sockets by default on images with support (`344 <https://github.com/arista-eosplus/ztpserver/pull/344>`_) [`jerearista <https://github.com/jerearista>`_]
.. comment
* Action install_image - support for disabling downgrade (`343 <https://github.com/arista-eosplus/ztpserver/pull/343>`_) [`jerearista <https://github.com/jerearista>`_]
.. comment
* Resolve file download issues - disable gzip, deflate in HTTP requests (`342 <https://github.com/arista-eosplus/ztpserver/pull/342>`_) [`jerearista <https://github.com/jerearista>`_]
.. comment
Fixed
^^^^^
* Fix "resource file missing" error on validate (`339 <https://github.com/arista-eosplus/ztpserver/issues/339>`_) [`urvishpanchal <https://github.com/urvishpanchal>`_]
.. comment
* Docs - Fix setup.py install paths when on ReadTheDocs (`328 <https://github.com/arista-eosplus/ztpserver/pull/328>`_) [`jerearista <https://github.com/jerearista>`_]
.. comment
Known Caveats
^^^^^^^^^^^^^
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.5.0.rst | ReleaseNotes1.5.0.rst |
Glossary of terms
=================
.. glossary:: :sorted:
node
a node is a EOS instance which is provisioned via ZTPServer. A node is uniquely identified by its unique_id (serial number or system MAC address) and/or unique position in the network.
action
an action is a Python script which is executed during the bootstrap process.
attribute
an attribute is a variable that holds a value. attributes are used in order to customise the behaviour of actions which are executed during the bootstrap process.
definition
a definition is a YAML file that contains a collection of all actions (and associated attributes) which need to run during the bootstrap process in order to fully provision a node
pattern
a pattern is a YAML file which describes a node in terms of its unique_id (serial number or system MAC) and/or location in the network (neighbors)
neighbordb
neighbordb is a YAML file which contains a collection of patterns which can be used in order to map nodes to definitions
resource pool
a resource pool is a is a set of resources which can be allocated on the server for the clients. For example, a YAML file can provide a mapping between a set or resources and the nodes to which some of the resources might have been allocated to (the nodes are uniquely identified via their system MAC).
unique_id
the unique identifier for a node. This can be configured, globally, to be the serial number (default) or system MAC address in the ztpserver.conf file
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/glossary.rst | glossary.rst |
Troubleshooting
===============
.. contents:: :local:
Basics
``````
When the ZTP process isn't behaving as expected, there are some basics that
should be checked regularly.
Updating to the latest Release is strongly encouraged
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ZTP Server is continually being enhanced and improved and its entirely possible that
the issue you've encountered has already been addressed, either in the documentation
such as :doc:`tips`, or in the code, itself. Therefore, we strongly encourage anyone
experiencing difficulty to reproduce the issue on the latest release version before
opening an issue or requesting support. See :ref:`upgrade`.
If the switch is not attempting Zero Touch Provisioning
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Check whether ZTP has been disabled on the switch::
Arista#show zerotouch
Validate the ZTP Server configuration syntax
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Many errors are simply due to typos or other syntax issues in config files.
It is good practice to use the --validate option to ztps and to paste configs
in to `http://yamllint.com/` to ensure they are well-formed YAML::
[user@ztpserver]$ ztps -–validate-config
Other troubleshooting steps
^^^^^^^^^^^^^^^^^^^^^^^^^^^
A number of other troubleshooting steps including how to specify the separate
apache log files just for ZTP Server, and how to do a test run of ztpserver
without reloading a switch are located on the :doc:`tips` page.
.. _before-requesting-support:
Before Requesting Support
`````````````````````````
Before requesting support, it is important to perform the following steps to
collect sufficient data to reduce information requests and enable timely resolution.
Version and Install method
^^^^^^^^^^^^^^^^^^^^^^^^^^
If not already recorded in the logs, please execute ``ztps --version`` and
specify whether your installation was from source (github), pip, RPM, or
a packer-ztpserver canned VM.
Server-side logs
^^^^^^^^^^^^^^^^
The location of server-side logs may vary depending on your specific environment.
* If running ZTP Server via Apache, check the VirtualHost definition for
CustomLog and ErrorLog entries, otherwise, look in the default Apache logs.
On Fedora, those will be in /var/log/httpd/
* If running the standalone ``ztps`` binary, a good choice for debugging, please
include the ``--debug`` option. Using ``ztps --debug 2>&1 | tee ztpserver.log`` will log
the output to both the screen and a file.
Client-side logs
^^^^^^^^^^^^^^^^
Ensure the bootstrap client is configured to log to syslog or XMPP via
/usr/share/ztpserver/bootstrap/bootstrap.conf and include that output. Attempting to
collect client side logs from the console frequently results in missing information
due to scroll buffers or line length.
Configuration Files
^^^^^^^^^^^^^^^^^^^
Please, also, include the files in /etc/ztpserver/ and /usr/share/ztpserver/
directories. ``tar czvf my_ztpserver_config.tgz /etc/ztpserver/ /usr/share/ztpserver/``
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/troubleshooting.rst | troubleshooting.rst |
.. ZTPServer documentation master file, created by
sphinx-quickstart on Tue Feb 18 16:40:25 2014.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
ZTPServer Overview
========================
ZTPServer provides a bootstrap environment for Arista EOS based products. It is written mostly in Python and leverages standard protocols like DHCP (for boot functions), HTTP (for bi-directional transport), XMPP and syslog (for logging). Most of the configuration files are YAML based.
This open source project is maintained by the `Arista Networks <http://arista.com/>`_ EOS+ services organization.
Highlights
``````````
* Extends the basic capability of EOS's zero-touch provisioning feature in order to allow more robust provisioning activities
* Is extensible, for easy integration into various network environments
* Can be run natively in EOS or any Linux server
* Arista EOS+ led community open source project
Features
````````
* Dynamic startup-config generation and automatic install
* Image and file system validation and standardization
* Connectivity validation and topology based auto-provisioning
* Config and device templates with dynamic resource allocation
* Zero-touch replacement and upgrade capabilities
* User extensible actions
* Email, XMPP, syslog based
.. _an_introduction:
.. toctree::
:maxdepth: 1
overview
install
startup
config
examples
cookbook
tips
internals
glossary
support
troubleshooting
license
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/index.rst | index.rst |
ZTPServer Documentation
========================
[ZTPServer official documentation](http://ztpserver.readthedocs.org/) is built and hosted at (http://ReadTheDocs.org/).
Contributing
------------
See CONTRIBUTING.md for information on maintaining documentation.
Building / Publishing Docs locally
----------------------------------
* `make` \(default make target is now `make html`\)
* Open file://_build/html/index.html in your browser to view.
* Publish by copying `docs/_build/html/*` to the `gh-pages` branch
Documenting REST APIs
---------------------
REST APIs are documented via the [httpdomain](https://pythonhosted.org/sphinxcontrib-httpdomain/) plugin for sphinx.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/README.md | README.md |
Contributing to ZTP Server Docs
===============================
Documenting REST APIs
---------------------
REST APIs are documented via the [httpdomain](https://pythonhosted.org/sphinxcontrib-httpdomain/) plugin for sphinx.
Docstrings in the code
----------------------
Other documentation extracted from the code should be in the [Google](http://google-styleguide.googlecode.com/svn/trunk/pyguide.html#Comments) format readable by the [Napoleon](http://sphinxcontrib-napoleon.readthedocs.org/en/latest/) module.
Editing RST documents
---------------------
Documentation is maintained in .rst files in the docs/ directory and in Google-style, or spinxcontrib-httpdomain docstrings within the code. Images and diagrams may be included in the _static/ directory. Please be sure you have built and viewed your work prior to sumbitting a pull-request back to the ‘develop’ branch.
Building and testing documentation generation
---------------------------------------------
To test contributions to the documentation, ensure you have the prerequisites, below, then follow the build and test instructions before submitting a pull request.
prerequisites
``````````````
sudo pip install sphinx_rtd_theme
sudo pip install sphinxcontrib-napoleon
# httpdomain (for REST APIs)
sudo pip install sphinxcontrib-httpdomain
**OR**
hg clone https://bitbucket.org/birkenfeld/sphinx-contrib
cd sphinx-contrib/httpdomain/
sudo python setup.py install
# If you wish to test building epub or PDF documents, you will need latex.
# latexpdf may require the following
https://tug.org/mactex/ OR
texlive-latex-recommended
texlive-latex-extra
texlive-fonts-recommended
sudo tlmgr option repository ftp://ftp.tug.org/historic/systems/texlive/2013/tlnet-final
sudo tlmgr --force install titlesec
sudo tlmgr install framed
sudo tlmgr install threeparttable
sudo tlmgr install wrapfig
sudo tlmgr install multirow
sudo tlmgr install collection-fontsrecommended
Building / Publishing Docs locally
``````````````````````````````````
* `make` \(default make target is now `make html`\)
* Open file://_build/html/index.html in your browser to view.
* Publish by copying `docs/_build/html/*` to the `gh-pages` branch
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/CONTRIBUTING.md | CONTRIBUTING.md |
Release 1.4
-----------
(Published August, 2015)
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
Enhancements
^^^^^^^^^^^^
* Plugin infrastructure for resource pool allocation (`121 <https://github.com/arista-eosplus/ztpserver/issues/121>`_)
* Use the order of entries in the file for allocating resources from a file via the ``allocate`` plugin (`319 <https://github.com/arista-eosplus/ztpserver/issues/319>`_)
* Documenatation updates:
- Plugin infrastructure for resource pool allocation (`121 <https://github.com/arista-eosplus/ztpserver/issues/121>`_)
Bug fixes
^^^^^^^^^
* Starting ZTPServer fails because ``pkg_resources.DistributionNotFound: mock`` (`318 <https://github.com/arista-eosplus/ztpserver/issues/318>`_)
* Bootstrap file cannot be read by server (`308 <https://github.com/arista-eosplus/ztpserver/issues/308>`_)
* Bootstrap script fails because of broken pipe in EOS-4.14.5+ (`312 <https://github.com/arista-eosplus/ztpserver/issues/312>`_)
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.4.rst | ReleaseNotes1.4.rst |
Client - Server API
-------------------
.. The RESTful API is documented using sphinxcontrib-httpdomain. See
http://pythonhosted.org/sphinxcontrib-httpdomain/
.. Verify sync with ztpserver.controller.py using the following:
(PYTHONPATH=.; python)
my_map = controller.Router()
print(my_map.map)
.. contents:: :local:
URL Endpoints
~~~~~~~~~~~~~
+---------------+-----------------------------------------+
| HTTP Method | URI |
+===============+=========================================+
| GET | /bootstrap |
+---------------+-----------------------------------------+
| GET | /bootstrap/config |
+---------------+-----------------------------------------+
| POST | /nodes |
+---------------+-----------------------------------------+
| GET | /nodes/{id} |
+---------------+-----------------------------------------+
| PUT | /nodes/{id}/startup-config |
+---------------+-----------------------------------------+
| GET | /nodes/{id}/startup-config |
+---------------+-----------------------------------------+
| GET | /actions/{name} |
+---------------+-----------------------------------------+
| GET | /files/{filepath} |
+---------------+-----------------------------------------+
| GET | /meta/{actions|files|nodes}/{PATH_INFO} |
+---------------+-----------------------------------------+
GET bootstrap script
^^^^^^^^^^^^^^^^^^^^
.. http:get:: /bootstrap
Returns the default bootstrap script
**Request**
.. sourcecode:: http
GET /bootstrap HTTP/1.1
**Response**
.. code-block:: http
Content-Type: text/x-python
<contents of bootstrap client script>
:resheader Content-Type: text/x-python
:statuscode 200: OK
.. note::
For every request, the bootstrap controller on the
ZTPServer will attempt to perform the following string replacement
in the bootstrap script): **“$SERVER“ ---> the value of the
“server\_url” variable in the server’s global configuration file**. This
string replacement will point the bootstrap client back to the
server in order to enable the client to make additional requests for
further resources on the server.
- if the ``server_url`` variable is missing from the server’s global
configuration file, 'http://ztpserver:8080' is used by default
- if the ``$SERVER`` string is missing from the bootstrap script, the
controller will log a warning message and continue
GET bootstrap logging configuration
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. http:get:: /bootstrap/config
Returns the logging configuration from the server.
**Request**
.. sourcecode:: http
GET /bootstrap/config HTTP/1.1
**Response**
.. sourcecode:: http
Content-Type: application/json
{
“logging”*: [ {
“destination”: “file:/<PATH>” | “<HOSTNAME OR IP>:<PORT>”, //localhost enabled
//by default
“level”*: <DEBUG | CRITICAL | ...>,
} ]
},
“xmpp”*:{
“server”: <IP or HOSTNAME>,
“port”: <PORT>, // Optional, default 5222
“username”*: <USERNAME>,
“domain”*: <DOMAIN>,
“password”*: <PASSWORD>,
“nickname”: <NICKNAME>, // Optional, default ‘username’
“rooms”*: [ <ROOM>, … ]
}
}
}
**Note**: \* Items are mandatory (even if value is empty list/dict)
:resheader Content-Type: application/json
:statuscode 200: OK
POST node details
^^^^^^^^^^^^^^^^^
Send node information to the server in order to check whether it can be
provisioned.
.. http:post:: /nodes
**Request**
.. sourcecode:: http
Content-Type: application/json
{
“model”*: <MODEL_NAME>,
“serialnumber”*: <SERIAL_NUMBER>,
“systemmac”*: <SYSTEM_MAC>,
“version”*: <INTERNAL_VERSION>,
“neighbors”*: {
<INTERFACE_NAME(LOCAL)>: [ {
'device': <DEVICE_NAME>,
'remote_interface': <INTERFACE_NAME(REMOTE)>
} ]
},
}
**Note**: \* Items are mandatory (even if value is empty list/dict)
**Response**
Status: 201 Created OR 409 Conflict will both return:
.. sourcecode:: http
Content-Type: text/html
Location: <url>
:statuscode 201: Created
:statuscode 409: Conflict
:statuscode 400: Bad Request
GET node definition
^^^^^^^^^^^^^^^^^^^
Request definition from the server.
.. http:get:: /nodes/(ID)
**Request**
.. sourcecode:: http
GET /nodes/{ID} HTTP/1.1
Accept: application/json
**Response**
.. sourcecode:: http
Content-Type: application/json
{
“name”*: <DEFINITION_NAME>
“actions”*: [{ “action”*: <NAME>*,
“description”: <DESCRIPTION>,
“onstart”: <MESSAGE>,
“onsuccess”: <MESSAGE>,
“onfailure”: <MESSAGE>,
“always_execute”: [True, False],
“attributes”: { <KEY>: <VALUE>,
<KEY>: { <KEY> : <VALUE>},
<KEY>: [ <VALUE>, <VALUE> ]
}
},...]
}
**Note**: \* Items are mandatory (even if value is empty list/dict)
:resheader Content-Type: application/json
:statuscode 200: OK
:statuscode 400: Bad Request
:statuscode 404: Not Found
PUT node startup-config
^^^^^^^^^^^^^^^^^^^^^^^
This is used to backup the startup-config from a node to the server.
.. http:put:: /nodes/(ID)/startup-config
**Request**
.. sourcecode:: http
Content-Type: text/plain
<startup-config contents>
:statuscode 201: Created
:statuscode 400: Bad Request
GET node startup-config
^^^^^^^^^^^^^^^^^^^^^^^
This is used to retrieve the startup-config that was backed-up from a node to the server.
.. http:get:: /nodes/(ID)/startup-config
**Request**
.. sourcecode:: http
Content-Type: text/plain
**Response**
Status: 201 Created OR 409 Conflict will both return:
.. sourcecode:: http
Content-Type: text/plain
<startup-config contents>
:resheader Content-Type: text/plain
:statuscode 200: OK
:statuscode 400: Bad Request
GET actions/(NAME)
^^^^^^^^^^^^^^^^^^
.. http:get:: /actions/(NAME)
Request action from the server.
**Request Example**
.. sourcecode:: http
GET /actions/add_config HTTP/1.1
**Response**
.. sourcecode:: http
Content-Type: text/x-python
<raw action content>
:resheader Content-Type: text/x-python
:statuscode 200: OK
:statuscode 404: Not Found
GET resource files
^^^^^^^^^^^^^^^^^^
.. http:get:: /files/(RESOURCE_PATH)
Request action from the server.
**Request Examples**
.. sourcecode:: http
GET /files/images/vEOS.swi HTTP/1.1
GET /files/templates/ma1.template HTTP/1.1
**Response**
.. sourcecode:: http
<raw resource contents>
:resheader Content-Type:text/plain
:statuscode 200: OK
:statuscode 404: Not Found
GET meta data for a resource or file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. http:get:: /meta/(actions|files|nodes)/(PATH_INFO)
Request meta-data on a file.
**Example Requests**
.. sourcecode:: http
GET /meta/actions/add_config HTTP/1.1
GET /meta/files/images/EOS-4.14.5F.swi HTTP/1.1
GET /meta/nodes/001122334455/.node HTTP/1.1
**Response**
.. sourcecode:: http
{
sha1: "d3852470a7328a4aad54ce030c543fdac0baa475"
size: 160
}
:resheader Content-Type:application/json
:statuscode 200: OK
:statuscode 500: Server Error
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/api.rst | api.rst |
Startup
=======
.. contents:: :local:
**HTTP Server Deployment Options**
ZTPServer is a Python WSGI compliant application that can be deployed behind any WSGI web server or run as a standalone application.
After initial startup, any change to ``ztpserver.conf`` will require a server restart. However, all other files are read on-demand, therefore no server restart is required to pick up changes in definitions, neighbordb, resources, etc.
.. note:: The ``ztps`` standalone server executable is for demo and testing use ONLY. It is NOT recommended for production use!
Apache (mod_wsgi)
`````````````````
If using Apache, this section provides instructions for setting up ZTPServer using mod_wsgi. This section assumes the reader is familiar with Apache and has already installed mod_wsgi. For details on how to install mod_wsgi, please see the `modwsgi Quick Installation Guide <https://code.google.com/p/modwsgi/wiki/QuickInstallationGuide>`_.
To enable ZTPServer for an Apache server, we need to add the following WSGI configuration to the Apache config. A good location might be to create ``/etc/httpd/conf.d/ztpserver.conf`` or /etc/apache2/sites-enabled/ztpserver.conf:
.. code-block:: apacheconf
LoadModule wsgi_module modules/mod_wsgi.so
Listen 8080
<VirtualHost *:8080>
WSGIDaemonProcess ztpserver user=www-data group=www-data threads=50
WSGIScriptAlias / /etc/ztpserver/ztpserver.wsgi
# Required for RHEL
#WSGISocketPrefix /var/run/wsgi
<Location />
WSGIProcessGroup ztpserver
WSGIApplicationGroup %{GLOBAL}
# For Apache <= 2.2, use Order and Allow
Order deny,allow
Allow from all
# For Apache >= 2.4, Allow is replaced by Require
Require all granted
</Location>
# Override default logging locations for Apache
#ErrorLog /path/to/ztpserver_error.log
#CustomLog /path/to/ztpserver_access.log
</VirtualHost>
WSGIScriptAlias should point to the ztpserver.wsgi file which is installed by default under /etc/ztpserver/ztpserver.wsgi. You will notice that the ``<Location />`` directive is set to the root directory. This will enable ZTPServer to listen at the base server URL:
``http://<host_ip>:8080/bootstrap``
If you would like to run the ZTPServer under a subdirectory, leave the Apache configuration as it is listed above and modify the ZTPServer configuration to include the URL path prefix (``/ztpserver`` in this example).
For example, edit the default configuration file found at ``/etc/ztpserver/ztpserver.conf`` by modifying or adding the following line under the [default] section:
``server_url = http://<host_ip>:8080/ztpserver/``
where /ztpserver/ is the subdirectory you would like the wsgi to listen. Once completed, restart Apache and you should now be able to access your ZTPServer at the specified URL. To test, simply use curl - for example:
``curl http://<host_ip>:8080/ztpserver/bootstrap``
If everything is configured properly, curl should be able to retrieve the bootstrap script. If there is a problem, all of the ZTPServer log messages should be available under the Apache server error logs. See the ``ErrorLog`` directive in your Apache configuration to determine the location of the error log.
.. note:: File Permissions - Apache mod_wsgi will run ztpserver.wsgi as the specified system user in your Apache config. This use must be able to read/write to the files in ``/usr/share/ztpserver`` (or whereever you created your data_root.)
.. note:: SELinux - Apache will need to read and write to files in ``/usr/share/ztpserver``. Therefore, you might need to update/assign an SELinux user/role/type to these files. You can do something like ``chcon -R -h system_u:object_r:httpd_sys_script_rw_t /usr/share/ztpserver`` to accomplish that.
Standalone debug server
```````````````````````
.. note:: ZTPServer ships with a single-threaded server that is sufficient for testing or demonstration, only. It is not recommended for use with more than 10 nodes.
To start the standalone ZTPServer, exec the ztps binary:
.. code-block:: console
[root@ztpserver ztpserver]# ztps
INFO: [app:115] Logging started for ztpserver
INFO: [app:116] Using repository /usr/share/ztpserver
Starting server on http://<ip_address>:<port>
The following options may be specified when starting the ztps binary:
.. code-block:: console
-h, --help show this help message and exit
--version, -v Displays the version information
--conf CONF, -c CONF Specifies the configuration file to use
--validate-config, -V
Validates config files
--debug Enables debug output to the STDOUT
--clear-resources, -r
Clears all resource files
Assuming that the DHCP server is serving DHCP offers which include the path to the ZTPServer bootstrap script in Option 67 and that the EOS nodes can access the bootstrap file over the network, the provisioning process should now be able to automatically start for all the nodes with no startup configuration.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/startup.rst | startup.rst |
Release 1.6.0
-------------
New Modules
^^^^^^^^^^^
Enhancements
^^^^^^^^^^^^
* Add Ansible action docs (`352 <https://github.com/arista-eosplus/ztpserver/pull/352>`_) [`jerearista <https://github.com/jerearista>`_]
* Update Bootstrap regex used to detect old EOS versions that do not support unix sockets (`354 <https://github.com/arista-eosplus/ztpserver/pull/354>`_) [`jerearista <https://github.com/jerearista>`_]
* Add Docker (`362 <https://github.com/arista-eosplus/ztpserver/pull/362>`_) [`jerearista <https://github.com/jerearista>`_]
* Added support for streaming large EOS images to device for smaller switches (`376 <https://github.com/arista-eosplus/ztpserver/pull/376>`_) [`mhartista <https://github.com/mharista>`_]
Fixed
^^^^^
* Fix RPM builds (`351 <https://github.com/arista-eosplus/ztpserver/pull/351>`_) [`jerearista <https://github.com/jerearista>`_]
* Do not add blank line at the start of a file (`355 <https://github.com/arista-eosplus/ztpserver/pull/355>`_) [`jerearista <https://github.com/jerearista>`_]
* Fix interface matcher for remote_interface to properly process regex() values (`360 <https://github.com/arista-eosplus/ztpserver/pull/360>`_) [`jerearista <https://github.com/jerearista>`_]
* Fix intf pattern range (`361 <https://github.com/arista-eosplus/ztpserver/pull/361>`_) [`jerearista <https://github.com/jerearista>`_]
Known Caveats
^^^^^^^^^^^^^
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.6.0.rst | ReleaseNotes1.6.0.rst |
Examples
========
.. contents:: :local:
.. _global_config:
Global configuration file
`````````````````````````
.. code-block:: ini
[default]
# Location of all ztps boostrap process data files
data_root = /usr/share/ztpserver
# UID used in the /nodes structure (serialnumber or systemmac)
identifier = serialnumber
# Server URL to-be-advertised to clients (via POST replies) during the bootstrap process
server_url = http://172.16.130.10:8080
# Enable local logging
logging = True
# Enable console logging
console_logging = True
# Console logging format
console_logging_format = %(asctime)s:%(levelname)s:[%(module)s:%(lineno)d] %(message)s
# Globally disable topology validation in the bootstrap process
disable_topology_validation = False
[server]
# Note: this section only applies to using the standalone server. If
# running under a WSGI server, these values are ignored
# Interface to which the server will bind to (0:0:0:0 will bind to
# all available IPv4 addresses on the local machine)
interface = 172.16.130.10
# TCP listening port
port = 8080
[bootstrap]
# Bootstrap filename (file located in <data_root>/bootstrap)
filename = bootstrap
[neighbordb]
# Neighbordb filename (file located in <data_root>)
filename = neighbordb
.. _dynamic_neighbordb_example:
Dynamic neighbordb or pattern file
``````````````````````````````````
.. code-block:: yaml
---
patterns:
#dynamic sample
- name: dynamic_sample
definition: tor1
interfaces:
- Ethernet1: spine1:Ethernet1
- Ethernet2: spine2:Ethernet1
- any: ztpserver:any
- name: dynamic_sample2
definition: tor2
interfaces:
- Ethernet1: spine1:Ethernet2
- Ethernet2: spine2:Ethernet2
- any: ztpserver:any
.. _static_neighbordb_example:
Static neighbordb and /node/<unique-id>/pattern file
````````````````````````````````````````````````````
.. code-block:: yaml
---
patterns:
#static sample
- name: static_node
node: 000c29f3a39g
interfaces:
- any: any:any
.. _dynamic_definition_example:
Sample dynamic definition file
``````````````````````````````
.. code-block:: yaml
---
actions:
-
action: install_image
always_execute: true
attributes:
url: files/images/vEOS.swi
version: 4.13.5F
name: "validate image"
-
action: add_config
attributes:
url: files/templates/ma1.template
variables:
ipaddress: allocate('mgmt_subnet')
name: "configure ma1"
-
action: add_config
attributes:
url: files/templates/system.template
variables:
hostname: allocate('tor_hostnames')
name: "configure global system"
-
action: add_config
attributes:
url: files/templates/login.template
name: "configure auth"
-
action: add_config
attributes:
url: files/templates/ztpprep.template
name: "configure ztpprep alias"
-
action: add_config
attributes:
url: files/templates/snmp.template
variables: $variables
name: "configure snmpserver"
-
action: add_config
attributes:
url: files/templates/configpush.template
variables: $variables
name: "configure config push to server"
-
action: copy_file
always_execute: true
attributes:
dst_url: /mnt/flash/
mode: 777
overwrite: if-missing
src_url: files/automate/ztpprep
name: "automate reload"
attributes:
variables:
ztpserver: 172.16.130.10
name: tora
.. _template_example:
Sample templates
````````````````
.. code-block:: yaml
#login.template
#::::::::::::::
username admin priv 15 secret admin
.. code-block:: yaml
#ma1.template
#::::::::::::::
interface Management1
ip address $ipaddress
no shutdown
.. code-block:: yaml
#hostname.template
#::::::::::::::
hostname $hostname
.. _resources_example:
Sample resources
````````````````
::
#mgmt_subnet
#::::::::::::::
192.168.100.210/24: null
192.168.100.211/24: null
192.168.100.212/24: null
192.168.100.213/24: null
192.168.100.214/24: null
::
#tor_hostnames
#::::::::::::::
veos-dc1-pod1-tor1: null
veos-dc1-pod1-tor2: null
veos-dc1-pod1-tor3: null
veos-dc1-pod1-tor4: null
veos-dc1-pod1-tor5: null
.. _mode_examples:
Neighbordb pattern examples
```````````````````````````
Example #1
''''''''''
.. code-block:: yaml
---
- name: standard leaf definition
definition: leaf_template
node: ABC12345678
interfaces:
- Ethernet49: pod1-spine1:Ethernet1/1
- Ethernet50:
device: pod1-spine2
port: Ethernet1/1
In example #1, the topology map would only apply to a node with system ID
equal to **ABC12345678**. The following interface map rules apply:
- Interface Ethernet49 must be connected to node pod1-spine1 on port
Ethernet1/1
- Interface Ethernet50 must be connected to node pod1-spine2 on port
Ethernet1/1
Example #2
''''''''''
.. code-block:: yaml
---
- name: standard leaf definition
definition: leaf_template
node: 001c73aabbcc
interfaces:
- any: regex('pod\d+-spine\d+'):Ethernet1/$
- any:
device: regex('pod\d+-spine1')
port: Ethernet2/3
In this example, the topology map would only apply to the node with
system ID equal to **001c73aabbcc**. The following interface
map rules apply:
- At least one interface interface must be connected to node that matches the regular
expression 'pod+-spine+' on port Ethernet1/$ (any port on module 1)
- At least one interface and not the interface which matched in the previous step
must be connected to a node that matches the regular expression
'pod+-spine1' on port Ethernet2/3
Example #3
''''''''''
.. code-block:: yaml
---
- name: standard leaf definition
definition: dc-1/pod-1/leaf_template
variables:
- not_spine: excludes('spine')
- any_spine: regex('spine\d+')
- any_pod: includes('pod')
interfaces:
- Ethernet1: $any_spine:Ethernet1/$
- Ethernet2: $pod1-spine2:any
- any: excludes('spine1'):Ethernet49
- any: excludes('spine2'):Ethernet49
- Ethernet49:
device: $not_spine
port: Ethernet49
- Ethernet50:
device: excludes('spine')
port: Ethernet50
This example pattern could apply to any node that matches the interface
map. In includes the use of variables for cleaner implementation and
pattern re-use.
- Variable not\_spine matches any node name where 'spine' doesn't
appear in the string
- Variable any\_spine matches any node name where the regular
expression 'spine+' matches the name
- Variable any\_pod matches any node name where that includes the name
'pod' in it
- **Variable any\_pod\_spine combines variables any\_spine and any\_pod
into a complex variable that includes any name that matches the
regular express 'spine+' and the name includes 'pod' (not yet
supported)**
- Interface Ethernet1 must be connected to a node that matches the
any\_spine pattern and is connected on Ethernet1/$ (any port on
module 1)
- Interface Ethernet2 must be connected to node 'pod1-spine2' on any
Ethernet port
- Interface any must be connected to any node that doesn't have
'spine1' in the name and is connected on Ethernet49
- Interface any must be connected to any node that doesn't have
'spine2' in the name and wasn't already used and is connected to
Ethernet49
- Interface Ethernet49 matches if it is connected to any node that
matches the not\_spine pattern and is connected on port 49
- Interface Ethernet50 matches if the node is connected to port
Ethernet50 on any node whose name does not contain ‘spine’
Example #4
''''''''''
.. code-block:: yaml
---
- name: sample mlag definition
definition: mlag_leaf_template
variables:
any_spine: includes('spine')
not_spine: excludes('spine')
interfaces:
- Ethernet1: $any_spine:Ethernet1/$
- Ethernet2: $any_spine:any
- Ethernet3: none
- Ethernet4: any
- Ethernet5:
device: includes('oob')
port: any
- Ethernet49: $not_spine:Ethernet49
- Ethernet50: $not_spine:Ethernet50
This is a similar example to #3 that demonstrates how an MLAG pattern
might work.
- Variable any\_spine defines a pattern that includes the word 'spine'
in the name
- Variable not\_spine defines a pattern that matches the inverse of
any\_spine
- Interface Ethernet1 matches if it is connected to any\_spine on port
Ethernet1/$ (any port on module 1)
- Interface Ethernet2 matches if it is connected to any\_spine on any
port
- Interface 3 matches so long as there is nothing attached to it
- Interface 4 matches so long as something is attached to it
- Interface 5 matches if the node contains 'oob' in the name and is
connected on any port
- Interface49 matches if it is connected to any device that doesn't
have 'spine' in the name and is connected on Ethernet50
- Interface50 matches if it is connected to any device that doesn't
have 'spine' in the name and is connected on port Ethernet50
Example #5
''''''''''
.. code-block:: yaml
---
- name: Connected to Spine 2
definition: spine2
variables:
any_spine: includes('spine')
interfaces:
- any: $any_spine:regex('Ethernet[45]/\d+\1')
In this case, the pattern matches if `any` local interface is connected to a
device with `spine` in the hostname and to the 4th or 5th slot in the chassis.
More examples
`````````````
Additional ZTPServer file examples are available on GitHub at the `ZTPServer Demo <https://github.com/arista-eosplus/ztpserver-demo>`_.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/examples.rst | examples.rst |
Configuration
=============
.. contents:: :local:
Overview
~~~~~~~~
The ZTPServer uses a series of YAML files to provide its various
configuration and databases. Use of the YAML format makes the files
easier to read and makes it easier and more intuitive to add/update
entries (as opposed to other files formats such as JSON, or binary
formats such as SQL).
The ZTPServer components are housed in a single directory defined by the ``data_root`` variable in the global configuration file. The directory location will vary depending on the configuration in ``/etc/ztpserver/ztperserver.conf``.
The following directory structure is normally used:
.. code-block:: ini
[data_root]
bootstrap/
bootstrap
bootstrap.conf
nodes/
<unique_id)>/
startup-config
definition
pattern
config-handler
.node
attributes
actions/
files/
definitions/
resources/
neighbordb
All configuration files can be validated using:
::
(bash)# ztps --validate
.. _global_configuration:
Global configuration file
~~~~~~~~~~~~~~~~~~~~~~~~~
The global ZTPServer configuration file can be found at ``/etc/ztpserver/ztpserver.conf``. It uses the INI format (for details, see top section of `Python configparser <https://docs.python.org/2/library/configparser.html>`_).
An alternative location for the global configuration file may be specified by using the ``--conf`` command line option:
e.g.
::
(bash)# ztps --help
usage: ztpserver [options]
optional arguments:
-h, --help show this help message and exit
--version, -v Displays the version information
**--conf CONF, -c CONF Specifies the configuration file to use**
--validate-config, -V
Validates config files
--debug Enables debug output to the STDOUT
--clear-resources, -r
Clears all resource files
(bash)# ztps --conf /var/ztps.conf
If the global configuration file is updated, the server must be restarted in order to pick up the new configuration.
.. code-block:: ini
[default]
# Location of all ztps boostrap process data files
# default= /usr/share/ztpserver
data_root=<PATH>
# UID used in the /nodes structure
# default=serialnum
identifier=<serialnum | systemmac>
# Server URL to-be-advertised to clients (via POST replies) during the bootstrap process
# default=http://ztpserver:8080
server_url=<URL>
# Enable local logging
# default=True
logging=<True | False>
# Enable console logging
# default=True
console_logging=<True | False>
# Console logging format
# default=%(asctime)-15s:%(levelname)s:[%(module)s:%(lineno)d] %(message)s
console_logging_format=<(Python)logging format>
# Globally disable topology validation in the bootstrap process
# default=False
disable_topology_validation=<True | False>
[server]
# Note: this section only applies to using the standalone server. If
# running under a WSGI server, these values are ignored
# Interface to which the server will bind to (0:0:0:0 will bind to
# all available IPv4 addresses on the local machine)
# default=0.0.0.0
interface=<IP addr>
# TCP listening port
# default=8080
port=<TCP port>
[bootstrap]
# Bootstrap filename (file located in <data_root>/bootstrap)
# default=bootstrap
filename=<name>
[neighbordb]
# Neighbordb filename (file located in <data_root>)
# default=neighbordb
filename=<name>
.. note::
Configuration values may be overridden by setting environment variables, if the configuration attribute supports it. This is mainly used for testing and should not be used in production deployments.
Configuration values that support environment overrides use the ``environ`` keyword, as shown below:
.. code-block:: python
runtime.add_attribute(StrAttr(
name='data_root',
default='/usr/share/ztpserver',
environ='ZTPS_DEFAULT_DATAROOT'
))
In the above example, the ``data_root`` value is normally configured in the [default] section as ``data_root``; however, if the environment variable ``ZTPS_DEFAULT_DATAROOT`` is defined, it will take precedence.
.. _bootstrap_config:
Bootstrap configuration
~~~~~~~~~~~~~~~~~~~~~~~~
``[data_root]/bootstrap/`` contains files that control the bootstrap process of a node.
- **bootstrap** is the base bootstrap script which is going to be served to all clients in order to control the bootstrap process. Before serving the script to the clients, the server replaces any references to $SERVER with the value of ``server_url`` in the global configuration file.
- **bootstrap.conf** is a configuration file which defines the local logging configuration on the nodes (during the bootstrap process). The file is loaded on on-demand.
e.g.
.. code-block:: yaml
---
logging:
-
destination: "ztps.ztps-test.com:514"
level: DEBUG
- destination: file:/tmp/ztps-log
level: DEBUG
- destination: ztps-server:1234
level: CRITICAL
- destination: 10.0.1.1:9000
level: CRITICAL
xmpp:
domain: im.ztps-test.com
username: bootstrap
password: eosplus
rooms:
- ztps
- ztps-room2
.. note::
In order for XMPP logging to work, a non-EOS user need to be connected to the room specified in bootstrap.conf, before the ZTP process starts. The room has to be created (by the non-EOS user) before the bootstrap client starts logging the ZTP process via XMPP.
.. _static_provisioning:
Static provisioning - overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A node can be statically configured on the server as follows:
* create a new directory under ``[data_root]/nodes``, using the system's unique_id as the name
* create/symlink a *startup-config* or *definition* file in the newly-created folder
* if topology validation is enabled, also create/symlink a *pattern* file
* optionally, create *config-handler* script which is run whenever a PUT startup-config request succeeds
Static provisioning - startup_config
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
``startup-config`` provides a static startup-configuration for the node. If this file is present in a node’s folder, when the node sends a GET request to ``/nodes/<unique_id>``, the server will respond with a static definition that includes:
- a **replace\_config** action which will install the configuration file on the switch (see `actions <#actions>`__ section below for more on this). This action will be placed **first** in the definition.
- all the **actions** from the local **definition** file (see definition section below for more on this) which have the ``always_execute`` attribute set to ``True``
.. _definition:
Static provisioning - definition
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The **definition** file contains the set of actions which are going to be
performed during the bootstrap process for a node. The definition file
can be either: **manually created** OR **auto-generated by the server**
when the node matches one of the patterns in **neighbordb** (in this case the
definition file is generated based on the definition file associated
with the matching pattern in **neighbordb**).
.. code-block:: yaml
name: <system name>
actions:
-
action: <action name>
attributes: # attributes at action scope
always_execute: True # optional, default False
<key>: <value>
<key>: <value>
onstart: <msg> # message to log before action is executed
onsuccess: <msg> # message to log if action execution succeeds
onfailure: <msg> # message to log if action execution fails
...
attributes: # attributes at global scope
<key>: <value>
<key>: <value>
<key>: <value>
Static provisioning - attributes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Attributes are either key/value pairs, key/dictionary pairs, key/list pairs or key/reference pairs. They are all sent to the client in order to be passed in as arguments to actions.
Here are a few examples:
- key/value:
.. code-block:: yaml
attributes:
my_attribute : my_value
- key/dictionary
.. code-block:: yaml
attributes:
my_dict_attribute:
key1: value1
key2: value2
- key/list:
.. code-block:: yaml
attributes:
list_name:
- my_value1
- my_value2
- my_valueN
- key/reference:
.. code-block:: yaml
attributes:
my_attribute : $my_other_attribute
**key/reference** attributes are identified by the fact that the value starts with the ‘$’ sign, followed by the name of another attribute. They are evaluated before being sent to the client.
Example:
.. code-block:: yaml
attributes:
my_other_attribute: dummy
my_attribute : $my_other_attribute
will be evaluated to:
.. code-block:: yaml
attributes:
my_other_attribute: dummy
my_attribute : dummy
If a reference points to a non-existing attribute, then the variable
substitution will result in a value of *None*.
.. note::
Only **one level of indirection** is
allowed - if multiple levels of indirection are used, then the data
sent to the client will contain unevaluated key/reference pairs in
the attributes list (which might lead to failures or unexpected
results in the client).
The values of the attributes can be either strings, numbers, lists, dictionaries, or references to other attributes or plugin references for allocating resources.
Plugins can be used to allocate resources on the server side and then pass the result of the allocation back to the client via the definition. The supported plugins are:
- **allocate(resource\_pool)** - allocates an available resource from a file-based resource pool
- **sqlite(resource\_pool)** - allocates an available resource from a sqlite database
.. note::
Plugins can only be referenced with strings as arguments,
currently. See section on `add\_config <#actions>`__ action for
examples.
Attributes can be defined in three places:
- in the definition, at action scope
- in the definition, at global scope
- in the node’s attributes file (see below)
``attributes`` is a file which can be used in order to store attributes
associated with the node’s definition. This is especially useful
whenever multiple nodes share the same definition - in that case,
instead of having to edit each node’s definition in order to add the
attributes (at the global or action scope), all nodes can share the same
definition (which might be symlinked to their individual node folder)
and the user only has to create the attributes file for each node. The
``attributes`` file should be a valid key/value YAML file.
.. code-block:: yaml
<key>: <value>
<key>: <value>
...
For key/value, key/list and and key/reference attributes, in case of
conflicts between the three scopes, the following order of precidence rules are
applied to determine the final value to send to the client:
1. action scope in the definition takes precedence
2. attributes file comes next
3. global scope in the definition comes last
For key/dict attributes, in case of conflicts between the scopes, the
dictionaries are merged. In the event of dictionary key conflicts, the same
precidence rules from above apply.
Static provisioning - pattern
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``pattern`` file a way to validate the node's topology during the bootstrap process (if topology validation is enabled). The pattern file can be either:
- manually created
- auto-generated by the server, when the node matches one of the patterns in ``neighbordb`` (the pattern that is matched in ``neighbordb`` is, then, written to this file and used for topology validation in subsequent re-runs of the bootstrap process)
The format of a pattern is very similar to the format of ``neighordb``
(see `neighbordb <#neighbordb>`__ section below):
.. code-block:: yaml
variables:
<variable_name>: <function>
...
name: <single line description of pattern> # optional
interfaces:
- <port_name>:<system_name>:<neighbor_port_name>
- <port_name>:
device: <system_name>
port: <neighbor_port_name>
...
If the pattern file is missing when the node makes a GET request for its definition, the server will log a message and return either:
- 400 (BAD\_REQUEST) if topology validation is enabled
- 200 (OK) if topology validation is disabled
If topology validation is enabled globally, the following patterns can be used in order to disable it for a particular node:
- match **any** node which has at least one LLDP-capable neighbor:
.. code-block:: yaml
name: <pattern name>
interfaces:
- any: any:any
OR
- match **any** node which has no LLDP-capable neighbors:
.. code-block:: yaml
name: <pattern name>
interfaces:
- none: none:none
Static provisioning - config-handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``config-handler`` file can be any script which can be executed
on the server. The script will be executed every time a PUT startup-config
request succeeds for the node.
The script can be used for raising alarms, performing checks, submitting
the startup-config file to a revision control system, etc.
Static provisioning - log
~~~~~~~~~~~~~~~~~~~~~~~~~
The ``.node`` file contains a cached copy of the node’s details that were
received during the POST request the node makes to ``/nodes (URI)``.
This cache is used to validate the node’s neighbors against the
``pattern`` file, if topology validation is enabled (during the GET
request the node makes in order to retrieve its definition).
The ``.node`` is created automatically by the server and should not be edited manually.
Example .node file:
.. code-block:: json
{"neighbors": {"Management1": [{"device": "mgmt-server",
"port": "0050.569b.ad8d"},
{"device": "veos-leaf3.ztps-test.com",
"port": "Management1"},
{"device": "veos-spine2.ztps-test.com",
"port": "Management1"}
],
"Ethernet1": [{"device": "veos-leaf3.ztps-test.com",
"port": "Ethernet1"}
],
"Ethernet3": [{"device": "veos-spine2.ztps-test.com",
"port": "Ethernet3"}
]
},
"model": "vEOS",
"version": "4.15.1F",
"systemmac": "005056600663"
}
.. _dynamic_provisioning:
Dynamic provisioning - overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A node can be dynamically provisioned by creating a matching ``neighbordb`` (``[data_root]/neighbordb``) entry which maps to a definition. The entry can potentially match multiple nodes.
The associated definition should be created in [data_root]/definitions/.
.. _neighbordb:
Dynamic provisioning - neighbordb
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``neighbordb`` YAML file defines mappings between patterns
and definitions. If a node is not already configured via a static entry,
then the node’s topology details are attempted to be matched against
the patterns in ``neighbordb``. If a match is successful, then a node
definition will be automatically generated for the node (based on the
mapping in neighbordb).
There are 2 types of patterns supported in neighbordb:
node-specific (containing the **node** attribute, which refers to the
unique_id of the node) and global patterns.
Rules:
- if multiple node-specific entries reference the same unique_id, only the first will be in effect - all others will be ignored
- if both the **node** and **interfaces** attributes are specified and a node's unique_id is a match, but the topology information is not, then the overall match will fail and the global patterns will not be considered
- if there is no matching node-specific pattern for a node's unique_id, then the server will attempt to match the node against the global patterns (in the order they are specified in ``neighbordb``)
- if a node-specific pattern matches, the server will automatically generate an open pattern in the node's folder. This pattern will match any device with at least one LLDP-capable neighbor. Example: ``any: any:any``
.. code-block:: yaml
---
variables:
variable_name: function
...
patterns:
- name: <single line description of pattern>
definition: <defintion_url>
node: <unique_id>
config-handler: <config-handler>
variables:
<variable_name>: <function>
interfaces:
- <port_name>: <system_name>:<neighbor_port_name>
- <port_name>:
device: <system_name>
port: <neighbor_port_name>
...
.. note::
Mandatory attributes: **name**, **definition**, and either **node**, **interfaces** or both.
Optional attributes: **variables**, **config-handler**.
variables
'''''''''
The variables can be used to match the remote device and/or port name (``<system_name>``, ``<neighbor_port_name>`` above) for a neighbor. The supported values are:
**string**
same as exact(string) from below
exact (pattern)
defines a pattern that must be matched exactly (Note: this is the default function if another function is not specified)
regex (pattern)
defines a regex pattern to match the node name against
includes (string)
defines a string that must be present in system/port name
excludes (string)
defines a string that must not be present in system/port name
node: unique_id
'''''''''''''''
Serial number or MAC address, depending on the global 'identifier' attribute in **ztpserver.conf**.
interfaces: port\_name
''''''''''''''''''''''
Local interface name - supported values:
- **Any interface**
- any
- **No interface**
- none
- **Explicit interface**
- Ethernet1
- Ethernet2/4
- Management1
- **Interface list/range**
- Ethernet1-2
- Ethernet1,3
- Ethernet1-2,3/4
- Ethernet1-2,4
- Ethernet1-2,4,6
- Ethernet1-2,4,6,8-9
- Ethernet4,6,8-9
- Ethernet10-20
- Ethernet1/3-1/32
system\_name:neighbor\_port\_name
'''''''''''''''''''''''''''''''''
Remote system and interface name - supported values (STRING = any string
which does not contain any white spaces):
- ``any``: interface is connected
- ``none``: interface is NOT connected
- ``<STRING>:<STRING>``: interface is connected to specific
device/interface
- ``<STRING>`` (Note: if only the device is configured, then ‘any’ is
implied for the interface. This is equal to ``<DEVICE>:any``):
interface is connected to device
- ``<DEVICE>:any``: interface is connected to device
- ``<DEVICE>:none``: interface is NOT connected to device (might be
connected or not to some other device)
- ``$<VARIABLE>:<STRING>``: interface is connected to specific
device/interface
- ``<STRING>:<$VARIABLE>``: interface is connected to specific
device/interface
- ``$<VARIABLE>:<$VARIABLE>``: interface is connected to specific
device/interface
- ``$<VARIABLE>`` (‘any’ is implied for the interface. This is equal to
``$<VARIABLE>:any``): interface is connected to device
- ``$<VARIABLE>:any``: interface is connected to device
- ``$<VARIABLE>:none``: interface is NOT connected to device (might be
connected or not to some other device)
port\_name: system\_name:neighbor\_port\_name
'''''''''''''''''''''''''''''''''''''''''''''
Negative constraints
1. ``any: DEVICE:none``: no port is connected to DEVICE
2. ``none: DEVICE:any``: same as above
3. ``none: DEVICE:none``: same as above
4. ``none: any:PORT``: no device is connected to PORT on any device
5. ``none: DEVICE:PORT``: no device is connected to DEVICE:PORT
6. ``INTERFACES: any:none``: interfaces not connected
7. ``INTERFACES: none:any``: same as above
8. ``INTERFACES: none:none``: same as above
9. ``INTERFACES: none:PORT``: interfaces not connected to PORT on any
device
10. ``INTERFACES: DEVICE:none``: interfaces not connected to DEVICE
11. ``any: any:none``: bogus, will prevent pattern from matching
anything
12. ``any: none:none``: bogus, will prevent pattern from matching
anything
13. ``any: none:any``: bogus, will prevent pattern from matching
anything
14. ``any: none:PORT``: bogus, will prevent pattern from matching
anything
15. ``none: any:any``: bogus, will prevent pattern from matching
anything
16. ``none: any:none``: bogus, will prevent pattern from matching
anything
17. ``none: none:any``: bogus, will prevent pattern from matching
anything
18. ``none: none:none``: bogus, will prevent pattern from matching
anything
19. ``none: none:PORT``: bogus, will prevent pattern from matching
anything
Positive constraints
1. ``any: any:any``: "Open pattern" matches anything
2. ``any: any:PORT``: matches any interface connected to any device’s
PORT
3. ``any: DEVICE:any``: matches any interface connected to DEVICE
4. ``any: DEVICE:PORT``: matches any interface connected to DEVICE:PORT
5. ``INTERFACES: any:any``: matches if local interfaces is one of
INTERFACES
6. ``INTERFACES: any:PORT``: matches if one of INTERFACES is connected
to any device’s PORT
7. ``INTERFACES: DEVICE:any``: matches if one of INTERFACES is connected
to DEVICE
8. ``INTERFACES: DEVICE:PORT``: matches if one of INTERFACES is
connected to DEVICE:PORT
Definitions
~~~~~~~~~~~
``[data_root]/definitions/`` contains a set of shared definition files
which can be associated with patterns in ``neighbordb`` (see the :ref:`neighbordb`
section below) or added to/symlink-ed from nodes’ folders.
See :ref:`definition` for more.
Actions
~~~~~~~
``[data_root]/actions/`` contains the set of all actions available for use in
definitions.
New custom actions to-be referenced from definitions can be added to
``[data_root]/actions/``. These will be loaded on-demand and do not require
a restart of the ZTPServer. See ``[data_root]/actions`` for examples.
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| Action | Description | Required Attributes |
+=================================+===========================================================+========================================+
| :mod:`add_config` | Adds a block of configuration to the final startup-config | url |
| | file | |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`configure_ansible_client` | Create user and configure keys for Ansible deployment | user, passwd, group, root, key |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`copy_file` | Copies a file from the server to the destination node | src\_url, dst\_url, overwrite, mode |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`install_cli_plugin` | Installs a new EOS CLI plugin and configures rc.eos | url |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`install_extension` | Installs a new EOS extension | extension\_url, autoload, force |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`install_image` | Validates and installs a specific version of EOS | url, version, downgrade |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`replace_config` | Sends an entire startup-config to the node (overrides | url |
| | (overrides add\_config) | |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`run_bash_script` | Run bash script during bootstrap. | url |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`run_cli_commands` | Run CLI commands during bootstrap. | url |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
| :mod:`send_email` | Sends an email to a set of recipients routed | smarthost, sender, receivers, subject, |
| | through a relay host. Can include file attachments | body, attachments, commands |
+---------------------------------+-----------------------------------------------------------+----------------------------------------+
Additional details on each action are available in the :doc:`actions` module docs.
e.g.
Assume that we have a block of configuration that adds a list of
NTP servers to the startup configuration. The action would be
constructed as such:
.. code-block:: yaml
actions:
- name: configure NTP
action: add_config
attributes:
url: /files/templates/ntp.template
The above action would reference the ``ntp.template`` file which would contain configuration commands to
configure NTP. The template file could look like the following:
.. code-block:: console
ntp server 0.north-america.pool.ntp.org
ntp server 1.north-america.pool.ntp.org
ntp server 2.north-america.pool.ntp.org
ntp server 3.north-america.pool.ntp.org
When this action is called, the configuration snippet above will be
appended to the ``startup-config`` file.
The configuration templates can also contains **variables**, which are
automatically substituted during the action’s execution. A variable is
marked in the template via the '$' symbol.
e.g.
Let’s assume a need for a more generalized template that only needs
node specific values changed (such as a hostname and management IP
address). In this case, we’ll build an action that allows for **variable
substitution** as follows.
.. code-block:: yaml
actions:
- name: configure system
action: add_config
attributes:
url: /files/templates/system.template
variables:
hostname: veos01
ipaddress: 192.168.1.16/24
The corresponding template file ``system.template`` will provide the
configuration block:
.. code-block:: yaml
hostname $hostname
!
interface Management1
description OOB interface
ip address $ipaddress
no shutdown
This will result in the following configuration being added to the
``startup-config``:
.. code-block:: console
hostname veos01
!
interface Management1
description OOB interface
ip address 192.168.1.16/24
no shutdown
Note that in each of the examples, above, the template files are
just standard EOS configuration blocks.
Plugins for allocating resources
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Plugins for allocating resources from resource pools
are located in ``[data_root]/plugins/`` and are referenced
by ``<filename>(<resource_pool>)``.
Each plugin contains a ``main`` function with the following signature:
def main(node_id, pool):
...
where:
- ``node_id`` is the unique_id of the node being provisioned
- ``pool`` is the name of the resource pool from which an attribute is being allocated
New custom plugins to-be referenced from definitions can be added to
``[data_root]/plugins/``. These will be loaded on-demand and do not require
a restart of the ZTPServer. See ``[data_root]/plugins/test`` for a very basic
example.
**allocate(resource_pool)**
``[data_root]/resources/`` contains global resource pools from which
attributes in definitions can be allocated.
The resource pools provide a way to dynamically allocate a resource to a
node when the node definition is created. The resource pools are
key/value YAML files that contain a set of resources to be allocated to
a node.
.. code-block:: console
<value1>: <"null"|node_identifier>
<value2>: <"null"|node_identifier>
In the example below, a resource pool contains a series of 8 IP
addresses to be allocated. Entries which are not yet allocated to a node
are marked using the ``null`` descriptor.
.. code-block:: console
192.168.1.1/24: null
192.168.1.2/24: null
192.168.1.3/24: null
192.168.1.4/24: null
192.168.1.5/24: null
192.168.1.6/24: null
192.168.1.7/24: null
192.168.1.8/24: null
When a resource is allocated to a node’s definition, the first available
null value will be replaced by the node’s unique_id. Here is an
example:
.. code-block:: console
192.168.1.1/24: 001c731a2b3c
192.168.1.2/24: null
192.168.1.3/24: null
192.168.1.4/24: null
192.168.1.5/24: null
192.168.1.6/24: null
192.168.1.7/24: null
192.168.1.8/24: null
On subsequent attempts to allocate the resource to the same node, ZTPS
will first check to see whether the node has already been allocated a
resource from the pool. If it has, it will reuse the resource instead of
allocating a new one.
In order to free a resource from a pool, simply turn the value
associated to it back to ``null``, by editing the resource file.
Alternatively, ``$ztps --clear-resources`` can be used in order to free
all resources in all file-based resource files.
**sqlite(resource_pool)**
Allocates a resource from a pre-filled sqlite database. The database
is defined by the global variable, 'DB_URL' within the plugin. The database
can include multiple tables, but the value passed into the
'sqlite(resource_pool)' function will be used to look for an available resource.
Table structure should be as follows with the exact column names:
=============== ========
node_id key
=============== ========
NULL 1.1.1.1
NULL 1.1.1.2
NULL 1.1.1.3
=============== ========
Which can be created with statements like:
.. code-block:: mysql
CREATE TABLE `mgmt_subnet`(key TEXT, node_id TEXT)
and add entries with:
.. code-block:: mysql
INSERT INTO `mgmt_subnet` VALUES('1.1.1.1', NULL)
When a resource is added, the node_id row will be updated
to include the System ID from the switch.
=============== ========
node_id key
=============== ========
001122334455 1.1.1.1
NULL 1.1.1.2
NULL 1.1.1.3
=============== ========
On subsequent attempts to allocate the resource to the same node,
ztpserver will first check to see whether the node has already been
allocated a resource from the pool. If it has, it will reuse the
resource instead of allocating a new one.
Definition example:
.. code-block:: yaml
actions:
-
action: add_config
attributes:
url: files/templates/ma1.templates
variables:
ipaddress: sqlite('mgmt_subnet')
name: "configure ma1"
.. tip::
Check out `create_db.py <https://raw.githubusercontent.com/arista-eosplus/ztpserver/develop/utils/create_db.py>`_ for an example script to create a sqlite database.
Config-handlers
~~~~~~~~~~~~~~~
``[data_root]/config-handlers/`` contains config-handlers which can be
associated with nodes via *neighbordb*. A config-handler script is executed
every time a PUT startup-config request succeeds for a node which is
associated to it.
Other files
~~~~~~~~~~~
``[data_root]/files/`` contains the files that actions might request
from the server. For example, ``[data_root]/files/images/`` could contain
all EOS SWI files.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/config.rst | config.rst |
Tips and tricks
===============
.. contents:: :local:
How do I update my local copy of ZTPServer from GitHub?
````````````````````````````````````````````````````````
Automatically
^^^^^^^^^^^^^
Go to the ZTPServer directory where you previously cloned the GitHub repository and execute:
``./utils/refresh_ztps [-b <branch>] [-f <path>]``
* <branch> can be any branch name in the Git repo. Typically this will be one of:
* "master" - for the latest release version
* "vX.Y.Z-rc" - for beta testing the next X.Y.Z release-candidate
* "develop" (DEFAULT) - for the latest bleeding-edge development branch
* <path> is the base directory of the ztpserver installation.
* /usr/share/ztpserver (DEFAULT)
Manually
^^^^^^^^
Remove the existing ZTPServer files:
.. code-block:: console
rm -rf /usr/share/ztpserver/actions/*
rm -rf /usr/share/ztpserver/bootstrap/*
rm -rf /usr/lib/python2.7/site-packages/ztpserver*
rm -rf /bin/ztps*
rm -rf /home/ztpuser/ztpserver/ztpserver.egg-info/
rm -rf /home/ztpuser/ztpserver/build/*
Go to the ZTPServer directory where you previously cloned the GitHub repository, update it, then build and install the server:
.. code-block:: console
bash-3.2$ git pull
bash-3.2$ python setup.py build
bash-3.2$ python setup.py install
My server keeps failing to load my resource files. What’s going on?
````````````````````````````````````````````````````````````````````
Did you know?
.. code-block:: yaml
a:b is INVALID YAML
a: b is VALID YAML syntax
Check out `YAML syntax checker <http://yamllint.com/>`_ for more.
How do I validate the format of my config files?
````````````````````````````````````````````````
To validate config files use ``ztps --validate``:
.. code-block:: console
[ztpsadmin@ztps ~]$ ztps --validate
Validating neighbordb ('/usr/share/ztpserver/neighbordb')... Ok!
Validating definitions...
Validating /usr/share/ztpserver/definitions/torb-withImageUpgrade... Ok!
Validating /usr/share/ztpserver/definitions/torb... Ok!
Validating /usr/share/ztpserver/definitions/tora-withImageUpgrade... Ok!
Validating /usr/share/ztpserver/definitions/tora... Ok!
Validating resources...
Validating /usr/share/ztpserver/resources/tor_hostnames... Ok!
Validating /usr/share/ztpserver/resources/ip_loopback... Ok!
Validating /usr/share/ztpserver/resources/ip_vlan100... Ok!
Validating /usr/share/ztpserver/resources/mgmt_subnet... Ok!
Validating nodes...
Validating /usr/share/ztpserver/nodes/001122334456/pattern... Ok!
Validating /usr/share/ztpserver/nodes/001122334456/definition... Ok!
Validating /usr/share/ztpserver/nodes/001122334455/pattern... Ok!
Validating /usr/share/ztpserver/nodes/001122334455/definition... Ok!
Validating /usr/share/ztpserver/nodes/001122334457/pattern... Ok!
Validating /usr/share/ztpserver/nodes/001122334457/definition... Ok!
How do I debug the ZTP Server provisioning process?
```````````````````````````````````````````````````
* If ZTP Server is running via wsgi, Check the Apache log files. Separate log files can be designated for ZTP Server's wsgi with the following::
<VirtualHost *:8080>
CustomLog logs/ztpserver-access_log common
ErrorLog logs/ztpserver-error_log
...
</VirtualHost>
* Run the standalone ZTP Server binary in debug mode and log the output to a file:
``ztps --debug 2>&1 | tee ztps.log``
* After changing configuration directives in neighbordb, a definition, etc, you may need to remove the node directory of the node-under-test before retrying ZTP on the node. This will ensure that ZTP Server matches the node against neighbordb instead of ``nodes/<serialnum>/pattern``.
* The ``bootstrap`` script may be manually run from a switch instead of going through an entire reload/ZTP cycle. To do this, download the script to the switch, then run it locally::
switch# bash wget http://ztpserver:8080/bootstrap
switch# bash chmod +x bootstrap
switch# bash sudo ./bootstrap
* On the client side, make sure you use XMPP (best) or remove syslog (second best) logging - you can configure that in *bootstrap.conf*.
* When requesting support, please include the output from the server (running in debug mode) and the console/log output from the switch.
How do I disable / enable ZTP mode on a switch
``````````````````````````````````````````````
By default, any switch that does not have a ``startup-config`` will enter ZTP mode to attempt to retrieve one. This feature was introduced in EOS 3.7 for fixed devices and EOS 4.10 for modular ones. In ZTP mode, the switch sends out DHCP requests on all interfaces and **will not forward traffic** until it reboots with a config.
To cancel ZTP mode, login as admin and type ``zerotouch cancel``. **This will trigger an immediate reload** of the switch, after which the switch will be ready to configure manually. At this point, if you ever erase the startup-config and reload, the switch will edn up ZTP mode again.
To completely disable ZTP mode, login as admin and type ``zerotouch disable``. **This will trigger an immediate reload** of the switch after which the switch will will be ready to configure manually. If you wish to re-enable ZTP, go to configure mode and run ``zerotouch enable``. The next time you erase the startup-config and reload the switch, the switch will end up ZTP mode again.
.. note:: vEOS instances come with a minimal startup-config so they do not boot in to ZTP mode by default. In order to use vEOS to test ZTP, enter ``erase startup-config`` and reload.
How can I test ZTPServer without having to reboot the switch every time?
````````````````````````````````````````````````````````````````````````
From a bash shell on the switch:
.. code-block:: console
# retrieve the bootstrap file from server
wget http://<ZTP_SERVER>:<PORT>/bootstrap
# make file executable
sudo chmod 777 bootstrap
# execute file
sudo ./bootstrap
What is the recommended test environment for ZTPServer?
```````````````````````````````````````````````````````
The best way to learn about and test a ZTPServer environment is to build the server and virtual (vEOS) nodes with Packer. See https://github.com/arista-eosplus/packer-ztpserver for directions.
If you setup your own environment, the following recommendations should assist greatly in visualizing the workflow and troubleshooting any issues which may arise. The development team strongly encourages these steps as Best Practices for testing your environment, and, most of these recommendations are also Best Practices for a full deployment.
* During testing, only - run the standalone server in debug mode: ``ztps --debug`` in a buffered shell. NOTE: do NOT use this standalone server in production, however, except in the smallest environments ( Approx 10 nodes or less, consecutively).
* Do not attempt any detailed debugging from a virtual or serial console. Due to the quantity of information and frequent lack of copy/paste access, this if often painful. Both suggested logging methods, below, can be configured in the :ref:`bootstrap_config`.
* (BEST) Setup XMPP logging. There are many XMPP services available, including `ejabberd <https://www.ejabberd.im/>`_, and even more clients, such as `Adium <https://adium.im/>`_. This will give you a single pane view of what is happening on all of your test switches. Our demo includes ejabberd with the following configuration:
* Server: im.ztps-test.com (or your ZTPServer IP)
* XMPP admin user: [email protected], passwd eosplus
* (Second) In place of XMPP, splecify a central syslog server in the bootstrap config.
How do I override the default system-mac in vEOS?
``````````````````````````````````````````````````
Add the desired MAC address to the first line of the file /mnt/flash/system_mac_address, then reboot (Feature added in 3.13.0F)
.. code-block:: console
[admin@localhost ~]$ echo 1122.3344.5566 > /mnt/flash/system_mac_address
How do I override the default serial number or system-mac in vEOS?
``````````````````````````````````````````````````````````````````
As of vEOS 4.14.0, the serial number and system mac address can be configured with a file in /mnt/flash/veos-config. After modifying SERIALNUMBER or SYSTEMMACADDR, a reboot is required for the changes to take effect.
.. code-block:: console
SERIALNUMBER=ABC12345678
SYSTEMMACADDR=1122.3344.5566
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/tips.rst | tips.rst |
Overview
========
ZTPServer provides a robust server which enables comprehensive bootstrap solutions for Arista network elements. ZTPserver takes advantage of the the ZeroTouch Provisioning (ZTP) feature in Arista's EOS (Extensible Operating System) which enables a node to connect to a provisioning server whenever a valid configuration file is missing from the internal flash storage.
ZTPServer provides a number of features that extend beyond simply loading a configuration file and a boot image on a node, including:
* sending an advanced bootstrap client to the node
* mapping each node to an individual definition which describes the bootstrap steps specific to that node
* defining configuration templates and actions which can be shared by multiple nodes - the actions can be customised using statically-defined or dynamically-generated attributes
* implementing environment-specific actions which integrate with external/internal management systems
* validation topology using a simple syntax for expressing LLDP neighbor adjacencies
* enabling Zero Touch Replacement, as well as configuration backup and management
ZTPServer is written in Python and leverages standard protocols like DHCP (DHCP options for boot functions), HTTP(S) (for bi-directional transport), XMPP and syslog (for logging). Most of the configuration files are YAML-based.
**Highlights:**
* extends the basic capability of ZTP (in EOS) to allow more robust provisioning activities
* is extensible and easy to integrate into any operational environment
* can be run natively in EOS or on a separate server
* is developed by a community lead by Arista's EOS+ team as an open-source project
**Features:**
* automated configuration file generation
* image and file system validation and standardization
* cable and connectivity validation
* topology-based auto-provisioning
* configuration templating with resource allocation (for dynamic deployments)
* Zero Touch Replacement and software upgrade capabilities
* user extensible actions
* XMPP and syslog-based logging and accounting
ZTP Intro
`````````
`Zero Touch Provisioning (ZTP) <http://www.arista.com/en/products/eos/automation/articletabs/0>`_ is a feature in Arista EOS's which, in the absence of a valid startup-config file, enables nodes to be configured over the networks.
The basic flow is as follows:
* check for startup-config, if absent, enter ZTP mode
* send DHCP requests on all connected interfaces
* if a DHCP response is received with Option 67 defined (bootfile-name), retrieve that file
* if that file is a startup-config, then save it to startup-config and reboot
* if that file is an executable, then execute it. Common actions executed this way include upgrading the EOS image, downloading extension packages, and dynamically building a startup-config file. (**ZTPServer's bootstrap script is launched this way**)
* reboot with the new configuration
See the `ZTP Tech Bulletin <https://www.arista.com/assets/data/pdf/TechBulletins/Tech_bulletin_ZTP.pdf>`_ and the `Press Release <http://www.arista.com/en/company/news/press-release/345-pr-20110215-01>`_ for more information on ZTP.
Architecture
````````````
There are 2 primary components of the ZTPServer implementation:
* the **server** or ZTPServer instance **AND**
* the **client** or bootstrap (a process running on each node, which connects back to the server in order to provision the node)
Server
``````
.. image:: _static/Components.png
:width: 353px
:align: right
The server can run on any standard x86 server. Currently the only OS-es tested are Linux and MacOS, but theoretically any system that supports Python could run ZTPServer. The server provides a Python WSGI compliant interface, along with a standalone HTTP server. The built-in HTTP server runs by default on port 8080 and provides bidirectional file transport and communication for the bootstrap process.
The primary methods of provisioning a node are:
* **statically** via mappings between node IDs (serial number or system MAC address) and configuration definitions OR
* **dynamically** via mapping between topology information (LLDP neighbors) and configuration definitions
The definitions associated with the nodes contain a set of actions that can perform a variety of functions that ultimately lead to a final device configuration. Actions can use statically configured attributes or leverage configuration templates and dynamically allocated resources (via resource pools) in order to generate the system configuration. Definitions, actions, attributes, templates, and resources are all defined in YAML files.
Client
``````
.. image:: _static/AttrsActions.png
:width: 353px
:align: right
The client or **bootstrap file** is retrieved by the node via an HTTP GET request made to the ZTPServer (the URL of the file is retrieved via DHCP option 67). This file executes locally and gathers system and LLDP information from the node and sends it back to the ZTPServer. Once the ZTPServer processes the information and confirms that it can provision the node, the client makes a request to the server for a definition file - this file will contain the list of all actions which need to be executed by the node in order to provision itself.
Throughout the provisioning process the bootstrap client can log all steps via both local and remote syslogs, as well as XMPP.
.. _message_flows:
ZTP Client-Server Message Flows
```````````````````````````````
The following diagram show the flow of information during the bootstrap process. The lines in **red** correspond to the ZTP feature in EOS, while the lines in **blue** highlight the ZTPServer operation:
(Red indicates Arista EOS flows. Blue indicates the bootstrap client.)
.. image:: _static/ztpserver-seqdiag.png
:alt: Message Flow Diagram
Topology Validation
```````````````````
.. image:: _static/LeafDefn.png
:width: 353px
:align: right
ZTPServer provides a powerful topology validation engine via either ``neighbordb`` or ``pattern`` files. As part of the bootstrap process for each node, the LLDP information received on all ports is sent to the ZTPServer and matched against either ``neighbordb`` or a node-specific ``pattern`` file (if a node is already configured on the server). Both are YAML files that are use a simple format to express strongly and loosely typed topology patterns. Pattern entries are processed top down and can include local or globally-defined variables (including regular expressions).
Patterns in ``neighbordb`` match nodes to definitions (dynamic mode), while node-specific pattern files are used for cabling and connectivity validation (static mode).
Topology-validation can be disabled:
* globally (``disable_topology_validation=true`` in the server’s global configuration file) OR
* on a per-node basis, using open patterns in the pattern files (see the *Pattern file configuration* section for more details)
Operational modes
`````````````````
There are several operational modes for ZTPServer, explained below. See :ref:`mode_examples` to see how to use them.
System ID-based provisioning with no topology validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Via node-specific folder:**
* a folder corresponding to the node's system ID is created on the server before bootstrap
* a definition file, startup-config file or both is/are placed in the folder
* topology validation is disabled globally (in the global configuration file) or via an open pattern in the pattern file located in the node-specific folder
**Via neighbordb:**
* a pattern which matches the node's system ID is created in neighbordb before bootstrap
* neighbordb pattern points to a definition file
* neighbordb pattern contains no topology information (LLDP neighbors)
* a node-specific folder with the definition and an open pattern will be created during the bootstrap process
System ID-based provisioning with topology validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
**Via node-specific folder:**
* a folder corresponding to the node's system ID is created on the server before bootstrap
* a definition file, startup-config file or both is/are placed in the folder
* topology validation is enabled globally (in the global configuration file) and the topology information is configured in the pattern file located in the node-specific folder
**Via neighbordb:**
* a pattern which matches the node's system ID is created in neighbordb before bootstrap
* neighbordb pattern points to a definition file
* neighbordb pattern contains topology information (LLDP neighbors)
* a node-specific folder with the definition and a pattern containing the matched toplogy information will be created during the bootstrap process
Topology-based provisioning
^^^^^^^^^^^^^^^^^^^^^^^^^^^
* a pattern which matches the topology information (LLDP neighbord) is created in neighbordb before bootstrap
* neighbordb pattern points to a definition file
* a node-specific folder with the definition and a pattern containing the matched toplogy information will be created during the bootstrap process
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/overview.rst | overview.rst |
License
=======
Copyright (c) 2013-2015, Arista Networks
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Arista Networks nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Third party
-----------
Requests v2.3.0: HTTP for Humans
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Copyright 2014 Kenneth Reitz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/license.rst | license.rst |
Release 1.1
-----------
(Published August, 2014)
The authoritative state for any known issue can be found in `GitHub issues <https://github.com/arista-eosplus/ztpserver/issues>`_.
Enhancements
^^^^^^^^^^^^
* V1.1.0 docs (`181 <https://github.com/arista-eosplus/ztpserver/pull/181>`_)
Documentation has been completely restructured and is now hosted at http://ztpserver.readthedocs.org/.
* refresh_ztps - util script to refresh ZTP Server installation (`177 <https://github.com/arista-eosplus/ztpserver/issues/177>`_)
/utils/refresh_ztps can be used in order to automatically refresh the installation of ZTP Server to the latest code on GitHub. This can be useful in order to pull bug fixes or run the latest version of various development branches.
* Et49 does not match Ethernet49 in neighbordb/pattern files (`172 <https://github.com/arista-eosplus/ztpserver/issues/172>`_)
The local interface in an interface pattern does not have to use the long interface name. For example, all of the following will be treated similarly: Et1, e1, et1, eth1, Eth1, ethernet1, Ethernet1.
Note that this does not apply to the remote interface, where different rules apply.
* Improve server-side log messages when there is no match for a node on the server (`171 <https://github.com/arista-eosplus/ztpserver/issues/171>`_)
.. comment
* Improve error message on server side when definition is missing from the definitions folder (`170 <https://github.com/arista-eosplus/ztpserver/issues/170>`_)
.. comment
* neighbordb should also support serialnumber as node ID (along with system MAC) (`167 <https://github.com/arista-eosplus/ztpserver/issues/167>`_)
Server now supports two types of unique identifiers, as specified in ztpserver.conf:
::
# UID used in the /nodes structure (either systemmac or serialnumber)
identifier = serialnumber
The configuration is global and applies to a single run of the server (neighbordb, resource files, nodes' folders, etc.).
* serialnumber should be the default identifier instead of systemmac (`166 <https://github.com/arista-eosplus/ztpserver/issues/166>`_)
The default identifier in ztpserver.conf is the serial number. e.g.
::
# UID used in the /nodes structure (either systemmac or serialnumber)
identifier = serialnumber
This is different from v1.0, where the systemmac was the default.
* Document which actions are dual-sup compatible and which are not (`165 <https://github.com/arista-eosplus/ztpserver/issues/165>`_)
All actions now document whether they are dual-sup compatible or not. See documentation for the details.
* dual-sup support for install_image action (`164 <https://github.com/arista-eosplus/ztpserver/issues/164>`_)
install_image is now compatible with dual-sup systems.
* Resource pool allocation should use the identifier instead of the systemmac (`162 <https://github.com/arista-eosplus/ztpserver/issues/162>`_)
The values in the resource files will be treated as either system MACs or serial numbers, depending on what identifier is configured in the global configuration file.
* Document actions APIs (`157 <https://github.com/arista-eosplus/ztpserver/issues/157>`_)
The API which can be used by actions is now documented in the documentation for the bootstrap script module.
* Get rid of return codes in bootstrap script (`155 <https://github.com/arista-eosplus/ztpserver/issues/155>`_)
.. comment
* Bootstrap script should always log a detailed message before exiting (`153 <https://github.com/arista-eosplus/ztpserver/issues/153>`_)
bootstrap script will log the reason for exiting, instead of an error code.
* Client should report what the error code means (`150 <https://github.com/arista-eosplus/ztpserver/issues/150>`_)
.. comment
* Improve server logs when server does not know about the node (`145 <https://github.com/arista-eosplus/ztpserver/issues/145>`_)
.. comment
* Configurable verbosity for logging options (server side) (`140 <https://github.com/arista-eosplus/ztpserver/issues/140>`_)
Bootstrap configuration file can now specify the verbosity of client-side logs:
::
...
xmpp:
username: ztps
password: ztps
domain: pcknapweed.lab.local
<b>msg_type : debug</b>
rooms:
- ztps-room
The allowed values are:
- debug: verbose logging
- info: log only messages coming from the server (configured in definitions)
The information is transmitted to the client via the bootstrap configuration request:
::
####GET logging configuration
Returns the logging configuration from the server.
GET /bootstrap/config
Request
Content-Type: text/html
Response
Status: 200 OK
Content-Type: application/json
{
“logging”*: [ {
“destination”: “file:/<PATH>” | “<HOSTNAME OR IP>:<PORT>”, //localhost enabled
//by default
“level”*: <DEBUG | CRITICAL | ...>,
} ]
},
“xmpp”*:{
“server”: <IP or HOSTNAME>,
“port”: <PORT>, // Optional, default 5222
“username”*: <USERNAME>,
“domain”*: <DOMAIN>,
“password”*: <PASSWORD>,
“nickname”: <NICKNAME>, // REMOVED
“rooms”*: [ <ROOM>, … ]
“msg_type”: [ “info” | “debug” ] // Optional, default “debug”
}
}
>**Note**: * Items are mandatory (even if value is empty list/dict)
P.S. (slightly unrelated) The nickname configuration has been deprecated (serialnumber is used instead).
* Configurable logging levels for xmpp (`139 <https://github.com/arista-eosplus/ztpserver/issues/139>`_)
bootstrap.conf:
::
logging:
...
xmpp:
...
nickname: ztps // (unrelated) this was removed - using serial number instead
msg_type: info // allowed values ['info', 'debug']
If msg_type is set to 'info', only log via XMPP error messages and 'onstart', 'onsuccess' and 'onfailure' error messages (as configured in the definition).
* Bootstrap should rename LLDP SysDescr to "provisioning" while executing or failing (`138 <https://github.com/arista-eosplus/ztpserver/issues/138>`_)
.. comment
* Test XMPP for multiple nodes being provisioned at the same time (`134 <https://github.com/arista-eosplus/ztpserver/issues/134>`_)
.. comment
* Server logs should include ID (MAC/serial number) of node being provisioned (`133 <https://github.com/arista-eosplus/ztpserver/issues/133>`_)
Most of the server logs will not be prefixed by the identifier of the node which is being provisioned - this should make debugging environments where multiple nodes are provisioned at the same time a lot easier.
* Use serial number instead of system MAC as the unique system ID (`131 <https://github.com/arista-eosplus/ztpserver/issues/131>`_)
.. comment
* Bootstrap script should disable copp (`122 <https://github.com/arista-eosplus/ztpserver/issues/122>`_)
.. comment
* Bootstrap script should check disk space before downloading any resources (`118 <https://github.com/arista-eosplus/ztpserver/issues/118>`_)
Bootstrap script will request the meta information from server, whenever it attempts to save a file to flash. This information will be used in order to check whether enough disk space is available for downloading the resource.
::
####GET action metadata
Request action from the server.
GET /meta/actions/NAME
Request
Content-Type: text/html
Response
Status: 200 OK
Content-Type: application/json
{
“size”*: <SIZE IN BYTES>,
“sha1”: <HASH STRING>
}
>**Note**: * Items are mandatory (even if value is empty list/dict)
Status: 404 Not found
Content-Type: text/html
Status: 500 Internal server error // e.g. permissions issues on server side
Content-Type: text/html
* ztps should check Python version and report a sane error is incompatible version is being used to run it (`110 <https://github.com/arista-eosplus/ztpserver/issues/110>`_)
ztps reports error if it is ran on a system with an incompatible Python version installed.
* Do not hardcode Python path (`109 <https://github.com/arista-eosplus/ztpserver/issues/109>`_)
.. comment
* Set XMPP nickname to serial number (`106 <https://github.com/arista-eosplus/ztpserver/issues/106>`_)
Serial number is used as XMPP presence/nickname. For vEOS instances which don't have one configured, systemmac is used instead.
* Send serial number as XMPP presence (`105 <https://github.com/arista-eosplus/ztpserver/issues/105>`_)
Serial number is used as XMPP presence/nickname. For vEOS instances which don't have one configured, systemmac is used instead.
* Support for EOS versions < 4.13.3 (`104 <https://github.com/arista-eosplus/ztpserver/issues/104>`_)
ZTP Server bootstrap script now supports any EOS v4.12.x or later.
* neighbordb should not be cached (`97 <https://github.com/arista-eosplus/ztpserver/issues/97>`_)
Neighbordb is not cached on the server side. This means that any updates to it do not require a server restart anymore.
* Definitions/actions should be loaded form disk on each GET request (`87 <https://github.com/arista-eosplus/ztpserver/issues/87>`_)
Definitions and actions are not cached on the server side. This means that any updates to them do not require a server restart anymore.
* Fix all pylint warnings (`83 <https://github.com/arista-eosplus/ztpserver/issues/83>`_)
.. comment
* add_config action should also accept server-root-relative path for the URL (`71 <https://github.com/arista-eosplus/ztpserver/issues/71>`_)
'url' atrribute in add_config action can be either a URL or a local server path.
* install_image action should also accept server-root-relative path for the URL (`70 <https://github.com/arista-eosplus/ztpserver/issues/70>`_)
'url' atrribute in install_image action can be either a URL or a local server path.
* Server logs should be timestamped (`63 <https://github.com/arista-eosplus/ztpserver/issues/63>`_)
All server-side logs now contain a timestamp. Use 'ztps --debug' for verbose debug output.
* After installing ZTPServer, there should be a dummy neighbordb (with comments and examples) and a dummy resource (with comments and examples) in /usr/share/ztpserver (`48 <https://github.com/arista-eosplus/ztpserver/issues/48>`_)
.. comment
* need test coverage for InterfacePattern (`42 <https://github.com/arista-eosplus/ztpserver/issues/42>`_)
.. comment
* test_topology must cover all cases (`40 <https://github.com/arista-eosplus/ztpserver/issues/40>`_)
.. comment
Resolved issues
^^^^^^^^^^^^^^^
* Syslog messages are missing system-id (vEOS) (`184 <https://github.com/arista-eosplus/ztpserver/issues/184>`_)
All client-side log message are prefixed by the serial number for now (regardless of what the identifier is configured on the server).
For vEOS, if the system does not have a serial number configured, the system MAC will be used instead.
* No logs while executing actions (`182 <https://github.com/arista-eosplus/ztpserver/issues/182>`_)
.. comment
* test_repository.py is leaking files (`174 <https://github.com/arista-eosplus/ztpserver/issues/174>`_)
.. comment
* Allocate function will return some SysMac in quotes, others not (`137 <https://github.com/arista-eosplus/ztpserver/issues/137>`_)
.. comment
* Actions which don't require any attributes are not supported (`129 <https://github.com/arista-eosplus/ztpserver/issues/129>`_)
.. comment
* Static pattern validation fails in latest develop branch (`128 <https://github.com/arista-eosplus/ztpserver/issues/128>`_)
.. comment
* Have a way to disable topology validation for a node with no LLDP neighbors (`127 <https://github.com/arista-eosplus/ztpserver/issues/127>`_)
COPP is disabled during the bootstrap process for EOS v4.13.x and later. COPP is not supported for older releases.
* Investigate "No loggers could be found for logger sleekxmpp.xmlstream.xmlstream" error messages on client side (`120 <https://github.com/arista-eosplus/ztpserver/issues/120>`_)
.. comment
* ZTPS should not fail if no variables are defined in neighbordb (`114 <https://github.com/arista-eosplus/ztpserver/issues/114>`_)
.. comment
* ZTPS should not fail if neighbordb is missing (`113 <https://github.com/arista-eosplus/ztpserver/issues/113>`_)
.. comment
* ZTPS installation should create dummy neighbordb (`112 <https://github.com/arista-eosplus/ztpserver/issues/112>`_)
ZTP Server install will create a placeholder neighbordb with instructions.
* Deal more gracefully with invalid YAML syntax in resource files (`75 <https://github.com/arista-eosplus/ztpserver/issues/75>`_)
.. comment
* Server reports AttributeError if definition is not valid YAML (`74 <https://github.com/arista-eosplus/ztpserver/issues/74>`_)
.. comment
* fix issue with Pattern creation from neighbordb (`44 <https://github.com/arista-eosplus/ztpserver/issues/44>`_)
.. comment
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/ReleaseNotes1.1.rst | ReleaseNotes1.1.rst |
Implementation Details
======================
.. contents:: :local:
Client-side implementation details
----------------------------------
Action attributes
~~~~~~~~~~~~~~~~~
The bootstrap script will pass in as argument to the main method of each
action a special object called ‘attributes’. The only API the action
needs to be aware for this object is the ‘get’ method, which will return
the value of an attribute, as configured on the server:
- the value can be local to a particular action or global
- if an attribute is defined at both the local and global scopes, the
local value takes priority
- if an attribute is not defined at either the local or global level,
then the ‘get’ method will return **None**
e.g. (action code)
.. code-block:: python
def main(attributes):
print(attributes.get(‘software_image’))
Besides the values coming from the server, a couple of **special
entries**\ \* (always upper case) are also contained in the attributes
object:
* ‘NODE’: a node object for making eAPI calls to localhost. See the :doc:`client` documentation.
e.g. (action\_code)
.. code-block:: python
def main(attributes):
print(attributes.get(‘NODE’).api_enable_cmds([‘show version’]))
Bootstrap URLs
~~~~~~~~~~~~~~
1. DHCP response contains the **URL pointing to the bootstrap script** on the server
2. The location of the server is hardcoded in
the bootstrap script, using the SERVER global variable. The bootstrap
script uses this base address in order to generate the **URL to use
in order to GET the logging details**: ``BASE_URL/config`` e.g.
.. code-block:: ini
SERVER = ‘http://my-bootstrap-server:80’ # Note that the port and the transport mechanism
# is included in the URL
3. The bootstrap script uses the SERVER base address in order to compute
the **URL to use in order to POST the node’s information:**
``BASE_URL/config``
4. The bootstrap script uses the ‘location’ header in the POST reply as
the **URL to use in order to request the definition**
5. **Actions and resources URLs**\ & are computed by using the base
address in the bootstrap script: BASE\_URL/actions/, BASE\_URL/files/
.. |POST FSM| image:: https://raw.githubusercontent.com/arista-eosplus/ztpserver/develop/tree/gh-pages/images/NodeControllerPOST-FSM.png
.. |GET FSM| image:: https://raw.githubusercontent.com/arista-eosplus/ztpserver/develop/tree/gh-pages/images/NodeControllerGET-FSM.png
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/implementation.rst | implementation.rst |
Run ZTPServer as a VM on EOS
===========================================
.. The line below adds a local TOC
.. contents:::local:
:depth: 1
**Introduction**
Bootstrapping network devices, much like bootstrapping servers, requires a server in place to handle that function. Often, it is cumbersome to have that server ready before the network is up and running. Therefore, it will be very convenient to have a server up and running, along with the first node in the network fabric, to handle bootstrapping for the rest of the infrastructure.
Arista EOS provides the capability to run VMs on top of EOS, therefore making the above scenario possible. The following set of recipes will help you perform the necessary steps to streamline your data center network bootstrapping process:
* You can have everything prepared and stored on a USB key.
* Plug in the USB key to the first SPINE switch in the data center.
* The rest of the data center fabric will be bootstrapped automatically!
There are 3 different deployment topologies, and your network design should fall into one of them. Each topology requires slightly different recipes, and they are explained in the following sections.
* **L2L3WM** : a L2 MLAG or L3 ECMP fabric with an out-of-band management network (switches managed via the management port)
* **L2WOM** : a L2 MLAG fabric without an out-of-band management network (switches managed in-band via SVI)
* **L3WOM** : a L3 ECMP fabric without an out-of-band management network (switches managed in-band via loopback)
.. toctree::
:maxdepth: 1
ztpsVMonEOS/l2l3wm
ztpsVMonEOS/l2wom
ztpsVMonEOS/l3wom
Deployment Steps
----------------
Objective
^^^^^^^^^
I want to use a single USB key to bootstrap my entire data center fabric.
Solution
^^^^^^^^
Follow the steps below:
1) Obtain an USB key that's at least 4GB and format it with either MS-DOS or FAT file system
2) Copy all the files listed in the "Files Needed" section onto the USB key
3) Plug the USB key into the USB port on the first SPINE switch
4) Sit back and watch your data center network fabric bring itself up!
.. note:: All files and directories present on the USB flash drive will be copied to the switch.
It is recommended that the USB drive contains only the three files listed above.
Explanation
^^^^^^^^^^^
The USB key method leverages the Arista Password Recovery mechanism. When the ``fullrecover`` and ``boot-config`` file is present on the USB key, the system will check the timestamp on the ``boot-config`` file.If the timestamp is different, all files on the USB key will be copied to the flash on the switch, and the switch will be rebooted and come up with the ``startup-config`` and the ``EOS.swi`` included on the USB key.
.. End of Deployment Steps | ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/ztpsVMonEOS.rst | ztpsVMonEOS.rst |
Advanced
========
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Configuration Management and prep for ZTR
-----------------------------------------
Objective
^^^^^^^^^
I want to automatically push the startup-config from each node to the
corresponding /nodes/ folder whenever changes are made on the node.
Solution
^^^^^^^^
The ZTPServer accepts HTTP PUT requests at ``nodes/<node_id>/startup-config``.
Therefore, we can configure and event-handler on the node during provisioning
which will perform this PUT anytime the startup-config is saved.
**1. Create event-handler template**
Choose the option that best fits your deployment. The variations are
Serial Number or System Mac Address, and Default VRF or Non-Default VRF.
Copy and paste the option text into a new template in:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Make sure you have a directory for templates
admin@ztpserver:~# mkdir -p files/templates
# Create a static config block
admin@ztpserver:~# vi files/templates/config-push.template
.. note:: Notice the ``$ztpserver``, ``$port`` and ``$vrf_name`` variables.
You can hardcode these in the template or abstract these to the
definition or attributes file (as shown in the next recipe).
**Option 1:** Using SystemMac and Default VRF
.. code-block:: console
event-handler configpush
trigger on-startup-config
action bash export SYSMAC=`FastCli -p 15 -c 'show ver | grep MAC | cut -d" " -f 5' | sed 's/[.]*//g'`; curl http://$ztpserver:$port/nodes/$SYSMAC/startup-config -H "content-type: text/plain" --data-binary @/mnt/flash/startup-config -X PUT
**Option 2:** Using SystemMac and Non-Default VRF
.. code-block:: console
event-handler configpush
trigger on-startup-config
! For non-default VRF, use:
action bash export SYSMAC=`FastCli -p 15 -c 'show ver | grep MAC | cut -d" " -f 5' | sed 's/[.]*//g'`; sudo ip netns exec ns-$vrf_name curl http://$ztpserver:$port/nodes/$SYSMAC/startup-config -H "content-type: text/plain" --data-binary @/mnt/flash/startup-config -X PUT
**Option 3:** Using Serial Number and Default VRF
.. code-block:: console
event-handler configpush
trigger on-startup-config
! For serial number, default VRF:
action bash export SERIAL=`FastCli -p 15 -c 'show ver' | grep Serial | tr -s ' ' | cut -d ' ' -f 3 | tr -d '\r'`; curl http://$ztpserver:$port/nodes/$SERIAL/startup-config -H "content-type: text/plain" --data-binary @/mnt/flash/startup-config -X PUT
**Option 4:** Using Serial Number and Non-Default VRF
.. code-block:: console
event-handler configpush
trigger on-startup-config
! For serial number, non-default VRF:
action bash export SERIAL=`FastCli -p 15 -c 'show ver' | grep Serial | tr -s ' ' | cut -d ' ' -f 3 | tr -d '\r'`; sudo ip netns exec ns-$vrf_name curl http://$ztpserver:$port/nodes/$SERIAL/startup-config -H "content-type: text/plain" --data-binary @/mnt/flash/startup-config -X PUT
Zero-touch replatement (ZTR)
----------------------------
Objective
^^^^^^^^^
I replaced a switch with a new one and want it to provision with the same
configuration and, optionally, EOS version as the node it replaced.
Solution
^^^^^^^^
ZTPServer first looks for a pre-existing definition for a node in the
``<configdir>/nodes/<node-id>`` directory before trying to match through neighbordb, etc.
Thus, you can make ZTPServer think it has already seen this node by
renaming, linking or copying the old-node's directory to the new-node's
unique-id before powering the switch on for the first time.
Moving (renaming) or linking are most commonly used, however, making a
recursive copy will ensure that the last-known configuration of the previous
node remains stored as a backup.
.. code-block:: console
cd /usr/share/ztpserver/nodes
ln -s <old-node_id> <new-node_id>
.. End of ZTR
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/advanced.rst | advanced.rst |
Installation
============
**Recipes**
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Install ZTPServer from Github Source
------------------------------------
Objective
^^^^^^^^^
I want to install ZTPServer from source.
Solution
^^^^^^^^
To install the latest code in `development <https://github.com/arista-eosplus/ztpserver/tree/develop>`_:
.. code-block:: console
# Change to desired download directory
mkdir -p ~/arista
cd ~/arista
git clone https://github.com/arista-eosplus/ztpserver.git
cd ztpserver
python setup.py build
python setup.py install
Or, to install a specific `tagged release <https://github.com/arista-eosplus/ztpserver/releases>`_:
.. code-block:: console
# Change to desired download directory
mkdir -p ~/arista
cd ~/arista
git clone https://github.com/arista-eosplus/ztpserver.git
cd ztpserver
git checkout v1.2.0
python setup.py build
python setup.py install
Explanation
^^^^^^^^^^^
Github is used to store the source code for the ZTPServer and the ``develop``
branch always contains the latest publicly available code. The first method above
clones the git repo and automatically checks out the ``develop`` branch. We then
``build`` and ``install`` using Python.
The second method uses the ``git checkout`` command to set your working
directory to a specific release of the ZTPServer. Both methods of installation
will produce the files below.
**Important Installation Files**
* ZTPServer Global Configuration File: ``/etc/ztpserver/ztpserver.conf``
* ZTPServer WSGI App: ``/etc/ztpserver/ztpserver.wsgi``
* ZTPServer Provisioning Files: ``/usr/share/ztpserver/`` known as ``data_root``
* Bootstrap Config File: ``/usr/share/ztpserver/bootstrap/bootstrap.conf``
* Bootstrap Python Script: ``/usr/share/ztpserver/bootstrap/bootstrap``
.. End of Install ZTPServer from Github Source
Install ZTPServer using PIP
---------------------------
Objective
^^^^^^^^^
Install ZTPServer using PyPI(pip)
Solution
^^^^^^^^
This option assumes you have a server with Python and pip pre-installed.
See `installing pip <https://pip.pypa.io/en/latest/installing.html>`_.
Once pip is installed, type:
.. code-block:: console
pip install ztpserver
Explanation
^^^^^^^^^^^
The pip install process will install all dependencies and run the install script,
leaving you with a ZTPServer instance ready to configure.
**Important Installation Files**
* ZTPServer Global Configuration File: ``/etc/ztpserver/ztpserver.conf``
* ZTPServer WSGI App: ``/etc/ztpserver/ztpserver.wsgi``
* ZTPServer Provisioning Files: ``/usr/share/ztpserver/`` known as ``data_root``
* Bootstrap Config File: ``/usr/share/ztpserver/bootstrap/bootstrap.conf``
* Bootstrap Python Script: ``/usr/share/ztpserver/bootstrap/bootstrap``
.. End of Install ZTPServer using PIP
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/install.rst | install.rst |
Puppet Agent - Bootstrap EOS
============================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Bootstrap EOS to Puppet
-----------------------
Objective
^^^^^^^^^
I want to bootstrap an EOS node with the Puppet agent.
Solution
^^^^^^^^
.. note:: Prior to EOS 4.14.5, eAPI must be configured with HTTPS or HTTP and a flash:eapi.conf must be created for rbeapi. Starting with EOS 4.14.5, rbeapi can use unix-sockets to communicate with eAPI, locally.
Download the `Puppet Enterprise agent <https://puppetlabs.com/download-puppet-enterprise-all#agent>`_ (may be used with Puppet Enterprise or Open Source) from PuppetLabs and the `Ruby client for eAPI (pe-rbeapi) <https://github.com/arista-eosplus/rbeapi/releases>`_ SWIX from GitHub. Place these files in /usr/share/ztpserver/files/puppet/``
.. code-block:: yaml
---
name: puppet-test
actions:
-
name: "Install Puppet agent"
action: install_extension
always_execute: true
attributes:
url: files/puppet/puppet-enterprise-3.8.2-eos-4-i386.swix
-
name: "Install rbeapi - Ruby client for eAPI"
action: install_extension
always_execute: true
attributes:
url: files/puppet/rbeapi-0.3.0.swix
-
name: "Configure host alias and eAPI for Puppet"
action: add_config
attributes:
url: files/templates/puppet.template
variables:
hostname: allocate('mgmt_hostnames')
domainname: example.com
puppetmaster: 172.16.130.10
ntpserver: 66.175.209.17
onstart: "Starting to configure EOS for Puppet"
onsuccess: "SUCCESS: Base config for Puppet"
.. code-block:: console
!
alias puppet bash sudo /opt/puppet/bin/puppet
!
hostname $hostname
!
ip domain-name $domainname
!
ip host puppet $puppetmaster
!
ntp server $ntpserver prefer iburst
!
management api http-commands
no protocol https
protocol unix-socket
no shutdown
!
Explanation
^^^^^^^^^^^
Here we use the ``install_extension`` action to install the Puppet agent and
Ruby client for eAPI, then apply a minimal configuration so the Puppet agent
can generate its SSL keys and contact the Puppet Master. The attributes listed
in the ``add_config`` action will be passed to the node so that it is able to
properly generate its SSL keypair and certificate signing request (CSR) and
validate the Puppet master's certificate.
.. note:: For more Action recipes see the Actions section.
.. End of Bootstrap a Puppet node
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/puppet.rst | puppet.rst |
Actions
=======
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Add a Configuration Block to a Node
-----------------------------------
Objective
^^^^^^^^^
In order to keep your provisioning data modular, you may want to break apart
configuration blocks into small code blocks. You can use the ``add_config``
action to place a block on code on the node.
Solution
^^^^^^^^
**Example 1: Add a static block of configuration to your node**
First, create a template file with the desired configuration.
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Make sure you have a directory for templates
admin@ztpserver:~# mkdir -p files/templates
# Create a static config block
admin@ztpserver:~# vi files/templates/east-dns.template
.. code-block:: console
!
ip name-server vrf default east.ns1.example.com
!
Then add the ``add_config`` action to your definition:
.. code-block:: yaml
---
actions:
-
action: add_config
attributes:
url: files/templates/east-dns.template
name: "Add East DNS Server"
Explanation
^^^^^^^^^^^
Here we defined a simple action that adds configuration to the node during
provisioning. The ``url`` in this case is relative to ``[data_root]``/url. It's
important to realize that the ZTPServer does not compile these configuration
blocks into a startup-config and then send a single file to the node. Rather,
the node executes each action independently, building the configuration in a
module fashion. If you are interested in performing variable substitution in your
templates to make them more flexible, see the next recipe.
.. note:: Please see the `add_config <http://ztpserver.readthedocs.org/en/master/actions.html#module-actions.add_config>`_
documentation for more details.
.. end of Add a Configuration Block to a Node
Add Configuration to a Node Using Variables
-------------------------------------------
Objective
^^^^^^^^^
I want to keep my templates flexible by using variables. In some cases, I'd like
to assign a variable from a resource pool.
Solution
^^^^^^^^
First, create a template file with the desired configuration. In this recipe let's
configure interface Management1.
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Make sure you have a directory for templates
admin@ztpserver:~# mkdir -p files/templates
# Create a static config block
admin@ztpserver:~# vi files/templates/ma1.template
Paste this config into the template:
.. code-block:: console
!
interface Management1
ip address $ipaddress
no shutdown
!
Then add the ``add_config`` action to your definition:
.. code-block:: yaml
---
actions:
-
action: add_config
attributes:
url: files/templates/ma1.template
variables:
ipaddress: allocate("mgmt_subnet")
name: "Configure Ma1"
Then create a resource pool called mgmt_subnet:
.. code-block:: console
# Create a resource pool
admin@ztpserver:~# vi resources/mgmt_subnet
Paste the following into ``mgmt_subnet``:
.. code-block:: yaml
192.0.2.10/24: null
192.0.2.11/24: null
192.0.2.12/24: null
192.0.2.13/24: null
Explanation
^^^^^^^^^^^
This recipe ties a few different concepts together. From a high-level, the definition
contains an action, ``add_config``, which references a configuration block, ``ma1.template``.
Further, we use a variable, ``$ipaddress`` in the template file so that the template
can be used for all nodes being provisioned. The final piece is the use of the
``allocate()`` plugin, which dynamically assigns a key from the associated
file-based resource pool.
In practice, when a node requests its definition the ZTPServer will execute the
``allocate("mgmt_subnet")`` plugin and assign a key from the pool.
The ZTPServer will then write the SYSTEM_ID as the value, overwriting ``null``.
If you wanted to use the assigned value elsewhere in the definition, simply call
``allocate(mgmt_subnet)`` and the plugin will not assign a new value, rather it
will return the key already assigned. Note that this is an implementation-detail
specific to this particular plugin - other plugins might vary (please read the
associated documentation for each).
The result would look like:
.. code-block:: yaml
192.0.2.10/24: <SYSTEM_ID>
192.0.2.11/24: null
192.0.2.12/24: null
192.0.2.13/24: null
.. note:: Please see the `add_config <http://ztpserver.readthedocs.org/en/master/actions.html#module-actions.add_config>`_
documentation for more details.
.. end of Add Configuration to a Node Using Variables
Replace Entire Startup-Config During Provisioning
-------------------------------------------------
Objective
^^^^^^^^^
I have a complete startup-config that I want to apply during provisioning. I want
to completely replace what's already on the node.
Solution
^^^^^^^^
First, create the startup-config with the desired configuration.
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Make sure you have a directory for templates
admin@ztpserver:~# mkdir -p files/configs
# Create a startup-config
admin@ztpserver:~# vi files/configs/tor-startup-config
.. code-block:: console
!
hostname test-node-1
ip name-server vrf default <DNS-SERVER-IP>
!
ntp server <NTP-SERVER-IP>
!
username admin privilege 15 role network-admin secret admin
!
interface Management1
ip address <MGMT-IP-ADDRESS>/<SUBNET>
!
ip access-list open
10 permit ip any any
!
ip route 0.0.0.0/0 <DEFAULT-GW>
!
ip routing
!
management api http-commands
no shutdown
!
banner login
Welcome to $(hostname)!
This switch has been provisioned using the ZTPServer from Arista Networks
Docs: http://ztpserver.readthedocs.org/
Source Code: https://github.com/arista-eosplus/ztpserver
EOF
!
end
Then add the ``replace_config`` action to your definition:
.. code-block:: yaml
---
actions:
-
action: replace_config
attributes:
url: files/configs/tor-startup-config
name: "Replace entire startup-config"
Explanation
^^^^^^^^^^^
This action simply replaces the ``startup-config`` which lives in ``/mnt/flash/startup-config``.
.. note:: Please see the `replace_config <http://ztpserver.readthedocs.org/en/master/actions.html#module-actions.replace_config>`_
documentation for more details.
.. end of Add a Configuration Block to a Node
Copy a File to a Node During Provisioning
-----------------------------------------
Objective
^^^^^^^^^
I want to copy a file to the node during the provisioning process and then
set its permissions.
Solution
^^^^^^^^
In this example we'll copy a python script to the node and set its permissions.
.. code-block:: yaml
---
actions:
-
action: copy_file
always_execute: true
attributes:
dst_url: /mnt/flash/
mode: 777
overwrite: if-missing
src_url: files/automate/bgpautoinf.py
name: "automate BGP peer interface config"
Explanation
^^^^^^^^^^^
Here we add the ``copy_file`` action to our definition. The attributes listed in
the action will be passed to the node so that it is able to retrieve the script
from ``[SERVER_URL]/files/automate/bgpautoinf.py``. Since we are using ``overwrite: if-missing``,
the action will only copy the file to the node if it does not already exist.
You could define the ``url`` as any destination the node can reach during provisioning - the
file does not need to exist on the ZTPServer.
.. note:: Please see the `copy_file <http://ztpserver.readthedocs.org/en/master/actions.html#module-actions.copy_file>`_
documentation for more details.
.. end of Copy a File to a Node During Provisioning
Install a Specific EOS Image
----------------------------
Objective
^^^^^^^^^
I want a specific (v)EOS version to be automatically installed when I provision
my node.
.. note:: This assumes that you've already downloaded the desired (v)EOS image
from `Arista <https://www.arista.com/en/support/software-download>`_.
Solution
^^^^^^^^
Let's create a place on the ZTPServer to host some SWIs:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create an images directory
admin@ztpserver:~# mkdir -p files/images
# SCP your SWI into the images directory, name it whatever you like
admin@ztpserver:~# scp admin@otherhost:/tmp/vEOS.swi files/images/vEOS_4.14.5F.swi
Now let's create a definition that performs the ``install_image`` action:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create a definition file
admin@ztpserver:~# vi definitions/tor-definition
Add the following lines to your definition, changing values where needed:
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
always_execute: true
attributes:
url: files/images/vEOS_4.14.5F.swi
version: 4.14.5F
name: "Install 4.14.5F"
.. note:: The definition uses YAML syntax
Explanation
^^^^^^^^^^^
In this case we are hosting the SWI on the ZTPServer, so we just define the ``url`` in relation
to the ``data_root``. We could change the ``url`` to point to another server
altogether - the choice is yours. The benefit of hosting the file on the
ZTPServer is that we perform an extra checksum step to validate the integrity of
the file.
In practice, the node requests its definition during the provisioning process. It
sees that it's supposed to perform the ``install_image`` action, so it
requests the ``install_image`` python script. It then performs an HTTP GET for
the ``url``. Once it has these locally, it executes the
`install_image <http://ztpserver.readthedocs.org/en/master/actions.html#module-actions.install_image>`_
script.
.. end of Install a specific EOS image
Install a Specific EOS Image without downgrading newer systems
--------------------------------------------------------------
Objective
^^^^^^^^^
I want a specific (v)EOS version to be automatically installed when I provision
my node but I don't want systems with newer EOS versions to be downgraded
.. note:: This assumes that you've already downloaded the desired (v)EOS image
from `Arista <https://www.arista.com/en/support/software-download>`_.
Solution
^^^^^^^^
Let's create a place on the ZTPServer to host some SWIs:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create an images directory
admin@ztpserver:~# mkdir -p files/images
# SCP your SWI into the images directory, name it whatever you like
admin@ztpserver:~# scp admin@otherhost:/tmp/vEOS.swi files/images/vEOS_4.14.5F.swi
Now let's create a definition that performs the ``install_image`` action:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create a definition file
admin@ztpserver:~# vi definitions/tor-definition
Add the following lines to your definition, changing values where needed. Specifically note the ``downgrade: false`` attribute.
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
attributes:
downgrade: false
url: files/images/vEOS_4.17.1F.swi
version: 4.17.1F
name: "Install 4.17.1F"
.. note:: The definition uses YAML syntax
Explanation
^^^^^^^^^^^
The difference between this recipe and the one, above, is setting the ``downgrade`` attribute to ``false``. When downgrades are disabled, an image will only be copied if the running image is older than the image in the ZTP configuration.
.. end of Install a specific EOS image
Install an Extension
--------------------
Objective
^^^^^^^^^
I want to install an extension on my node automatically.
Solution
^^^^^^^^
Let's create a place on the ZTPServer to host the RPMs:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create an images directory
admin@ztpserver:~# mkdir -p files/rpms
# SCP your SWI into the images directory, name it whatever you like
admin@ztpserver:~# scp admin@otherhost:/tmp/myRPM.rpm files/rpms/myRPM.rpm
Now let's create a definition that performs the ``install_extension`` action:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create a definition file
admin@ztpserver:~# vi definitions/tor-definition
Add the following lines to your definition, changing values where needed:
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_extension
always_execute: true
attributes:
url: files/rpms/myRPM.rpm
name: "Install myRPM extension"
.. note:: The definition uses YAML syntax
Explanation
^^^^^^^^^^^
The ``install_extension`` will copy the RPM defined in the ``url`` parameter and
copy it to the default extension directory, ``/mnt/flash/.extensions``
.. note:: Please see the `install_extension <http://ztpserver.readthedocs.org/en/master/actions.html#module-actions.install_extension>`_
documentation for more details.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/actions.rst | actions.rst |
Server-Side Logging
===================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Standalone - Redirect Output to file
------------------------------------
Objective
^^^^^^^^^
When running the ZTPServer in Standalone Mode, the logs just fill up my console
so I'd like to be able to redirect the output to a file.
Solution
^^^^^^^^
With INFO level logging:
.. code-block:: console
admin@ztpserver:~# ztps >~/ztps-console.log 2>&1 &
With DEBUG level logging:
.. code-block:: console
admin@ztpserver:~# ztps --debug >~/ztps-console.log 2>&1 &
Explanation
^^^^^^^^^^^
Here we invoke the ztps process as usual, however we redirect the stdout messages
to a predefined file. Of course, be sure that you have permission to write
to the file you have listed.
.. End of Standalone - Redirect Output to file
Apache - View Standard Logs
---------------------------
Objective
^^^^^^^^^
I'm running the ZTPServer as a WSGI under Apache, so where do the logs go?
Solution
^^^^^^^^
Typically, you can see each transaction in:
.. code-block:: console
# Ubuntu
admin@ztpserver:~# more /var/log/apache2/access.log
# Fedora
admin@ztpserver:~# more /var/log/httpd/access_log
And the ZTPServer logs will be in:
.. code-block:: console
# Ubuntu
admin@ztpserver:~# more /var/log/apache2/error.log
# Fedora
admin@ztpserver:~# more /var/log/httpd/error_log
Explanation
^^^^^^^^^^^
These locations are the default on most standard Apache installs. It might be
misleading, but all levels of ZTPServer logging will end up as an Apache error.
**Example**
.. code-block:: console
[Fri Dec 12 10:49:42.186976 2014] [:error] [pid 864] INFO: [app:115] Logging started for ztpserver
[Fri Dec 12 10:49:42.187112 2014] [:error] [pid 864] INFO: [app:116] Using repository /usr/share/ztpserver
.. End of Apache - View Standard Logs
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/serverLogging.rst | serverLogging.rst |
Ansible - Bootstrap EOS
=======================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Introduction
------------
The following recipes will help you bootstrap Arista EOS switches for use with
Ansible. Please review the `Ansible-EOS <http://ansible-eos.readthedocs.org/en/master/overview.html#the-ansible-eos-role>`_
documentation to determine your preferred connection type: SSH or eAPI.
.. note:: Please contact us if you are interested in dynamically adding your
nodes to Ansible Tower. We have various examples that utilize the
Tower API to add your node to a specific Tower inventory and/or group.
Bootstrap EOS for Ansible using SSH
-----------------------------------
Objective
^^^^^^^^^
I want to bootstrap an EOS node so that I can use Ansible to SSH to the node.
Solution
^^^^^^^^
.. note:: Prior to EOS 4.14.5, eAPI must be configured with HTTPS or HTTP and a
flash:eapi.conf must be created for pyeapi or the eAPI credentials
must be passed in the Ansible task using meta arguments.
Starting with EOS 4.14.5, pyeapi can use unix-sockets to communicate
with eAPI, locally.
**Step 1** Gather Ansible Control Host SSH Key
First, store the Ansible Control Host SSH key on the ZTPServer (or make it available via URL).
When the ``configure_ansible_client`` action runs it will create a bash user on the
switch and put this key in ``~/.ssh/authorized_keys``.
In ``[DATA_ROOT]/files/ssh/key.pub``
.. code-block:: console
ssh-rsa AAAAB3NzaC1yc....rest of public key......
**Step 2** Create a management IP resource pool
Reference this `recipe <http://ztpserver.readthedocs.org/en/develop/cookbook/actions.html#add-configuration-to-a-node-using-variables>`_
for an example.
**Step 3** Create eAPI configuration
In ``[DATA_ROOT]/files/templates/eapi.template``
**Option A** Using Unix Sockets (4.14.5+)
.. code-block:: console
!
management api http-commands
no protocol https
protocol unix-socket
no shutdown
!
**Option B** Using HTTPS
.. code-block:: console
!
management api http-commands
no shutdown
!
**Option C** Using HTTP
.. code-block:: console
!
management api http-commands
no shutdown
no protocol https
protocol http
!
**Step 4** Create a definition
Let's use the ``configure_ansible_client`` action to create the desired SSH user.
.. code-block:: yaml
---
actions:
-
action: configure_ansible_client
attributes:
key: files/ssh/key.pub
user: ansible
passwd: password
group: eosadmin
root: "/persist/local/"
name: "Configure Ansible"
-
action: add_config
attributes:
url: files/templates/ma1.template
variables:
ipaddress: allocate('mgmt_subnet')
name: "configure ma1"
-
action: add_config
attributes:
url: files/templates/eapi.template
name: "Enable eAPI"
Explanation
^^^^^^^^^^^
Here we use the ``add_config`` action to load the switch with a standard
eAPI configuration as well as assign Management1 interface an IP address
allocated from the mgmt_subnet pool. Note that ZTPServer supports custom
allocate scripts that could dynamically assign an IP address from your own
IPAM. Also, the ``configure_ansible_client`` action is called. This client-side
action will create a bash user, with the specified name, and install any
SSH keys provided to ``~/.ssh/authorized_keys``. This is helpful because it takes
care of authentication between the Ansible Control host and the switch. The action
also writes to ``rc.eos`` to create this user on every boot (since it would normally be blown away).
Bootstrap EOS for Ansible using eAPI
------------------------------------
Objective
^^^^^^^^^
I want to bootstrap an EOS node so that I can use Ansible in connection:local
mode and connect to my switch via eAPI.
Solution
^^^^^^^^
**Step 1** Create a management IP resource pool
Reference this `recipe <http://ztpserver.readthedocs.org/en/develop/cookbook/actions.html#add-configuration-to-a-node-using-variables>`_
for an example.
**Step 2** Create eAPI configuration
In ``[DATA_ROOT]/files/templates/eapi.template``
**Option A** Using HTTPS
.. code-block:: console
!
management api http-commands
no shutdown
!
**Option B** Using HTTP
.. code-block:: console
!
management api http-commands
no shutdown
no protocol https
protocol http
!
**Step 3** Create a definition
.. code-block:: yaml
---
actions:
-
action: add_config
attributes:
url: files/templates/ma1.template
variables:
ipaddress: allocate('mgmt_subnet')
name: "configure ma1"
-
action: add_config
attributes:
url: files/templates/eapi.template
name: "Enable eAPI"
Explanation
^^^^^^^^^^^
Here we use the ``add_config`` action to load the switch with a standard
eAPI configuration as well as assign Management1 interface an IP address
allocated from the mgmt_subnet pool. Note that ZTPServer supports custom
allocate scripts that could dynamically assign an IP address from your own
IPAM.
.. note:: For more Action recipes see the Actions section.
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/ansible.rst | ansible.rst |
Provision a Dynamic Node
========================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Using Open Patterns
-------------------
Objective
^^^^^^^^^
I want to provision a node without knowing anything about it. I just want it to
receive a default configuration.
Solution
^^^^^^^^
You can accomplish this by using neighbordb. Neighbordb contains associations
between LLDP neighbor patterns and definitions. So if we use a pattern that
matches anything, we can use it to assign a simple, default definition.
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Add the following lines to your definition, changing values where needed:
.. code-block:: yaml
---
patterns:
- name: Default Pattern
definition: default
interfaces:
- any: any:any
If you happen to be provisioning a node in isolation and the node does not have
any neighbors, use the following pattern:
.. code-block:: yaml
---
patterns:
- name: Default Pattern
definition: default
interfaces:
- none: none:none
Then add a definition to ``[data_root]/definitions/default``
.. note:: See the sections on Definitions and Actions to learn more.
Explanation
^^^^^^^^^^^
By placing this pattern in your neighbordb, the ZTPServer will allow this node
to be provisioned and will assign it the ``default`` definition. Use caution when
placing this pattern in your neighbordb as it might allow nodes to receive the
``default`` definition when you intend them to receive another pattern.
.. End of Using Open Patterns
Identify a Node Based Upon Specific Neighbor
--------------------------------------------
Objective
^^^^^^^^^
I want my node to be dynamically provisioned based upon a specific LLDP
neighbor association.
Solution
^^^^^^^^
Modify your neighbordb:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Then add the pattern that includes the required match.
.. code-block:: yaml
---
patterns:
- name: tora for pod1
definition: tora
interfaces:
- Ethernet1: dc1-pod1-spine1:Ethernet1
This pattern says that the node being provisioned must have a connection between
its Ethernet1 and dc1-pod1-spine1's Ethernet1.
Explanation
^^^^^^^^^^^
In this recipe we use neighbordb to link a pattern with a definition. When a node
executes the bootstrap script it will send the ZTPServer some information about
itself. The ZTPServer will not find any existing directory with the node's
System-ID (System MAC or Serial Number depending upon your configuration) so it
next checks neighbordb to try and find a match. The ZTPServer will analyze
the nodes LLDP neighbors, find the match in neighbordb and then apply the ``tora``
definition.
.. End of Identify a Node Based Upon Specific Neighbor
Identify a Node’s Neighbors Using Regex
---------------------------------------
Objective
^^^^^^^^^
I want my node to be dynamically provisioned and I'd like to match certain
neighbors using regex.
Solution
^^^^^^^^
Modify your neighbordb:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Then add the pattern that includes the required match.
.. code-block:: yaml
---
patterns:
- name: tora for pod1
definition: tora
interfaces:
- Ethernet1: regex('dc1-pod1-spine\D+'):Ethernet1
This pattern says that the node being provisioned must have a connection between
its Ethernet1 and any dc1-pod1-spines Ethernet1.
Explanation
^^^^^^^^^^^
In this recipe we use neighbordb to link a pattern with a definition. When a node
executes the bootstrap script it will send the ZTPServer some information about
itself. The ZTPServer will not find any existing directory with the node's
System-ID (System MAC or Serial Number depending upon your configuration) so it
next checks neighbordb to try and find a match. The ZTPServer will analyze
the nodes LLDP neighbors, find the match in neighbordb and then apply the ``tora``
definition.
.. note:: There are a few different functions that you can use other than ``regex()``.
Check out this `section <http://ztpserver.readthedocs.org/en/master/config.html#variables>`_
to learn more.
.. End of Identify a Node Based Upon Specific Neighbor
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/dynamicNodes.rst | dynamicNodes.rst |
Definitions
===========
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Add an Action to a Definition
-----------------------------
Objective
^^^^^^^^^
I want to use one of the built-in actions in my definition file.
Solution
^^^^^^^^
You can choose any of the pre-built actions to include in your definition.
.. note:: Learn more about `Actions <http://ztpserver.readthedocs.org/en/master/config.html#actions>`_.
In this example we'll copy a python script to the node and set its permissions.
.. code-block:: yaml
---
actions:
-
action: copy_file
always_execute: true
attributes:
dst_url: /mnt/flash/
mode: 777
overwrite: if-missing
src_url: files/automate/bgpautoinf.py
name: "automate BGP peer interface config"
Explanation
^^^^^^^^^^^
Here we add the ``copy_file`` action to our definition. The attributes listed in
the action will be passed to the node so that it is able to retrieve the script
from ``[SERVER_URL]/files/automate/bgpautoinf.py``. Since we are using ``overwrite: if-missing``,
the action will only copy the file to the node if it does not already exist.
.. note:: For more Action recipes see the Actions section.
.. End of Add an Action to a Definition
Add Global Variables to Definition
----------------------------------
Objective
^^^^^^^^^
I want to use a variable throughout my definition without having to define it
more than once.
Solution
^^^^^^^^
You can accomplish this by adding an ``attributes`` section at the root level of
your definition file.
.. note:: Learn more about `Actions <http://ztpserver.readthedocs.org/en/master/config.html#actions>`_.
In this example, we have two different actions that reference the same ``$mode``
and ``$dst`` variables.
.. code-block:: yaml
---
actions:
-
action: copy_file
always_execute: true
attributes:
dst_url: $dst
mode: $mode
overwrite: if-missing
src_url: files/automate/bgpautoinf.py
name: "Copy automate BGP script to node"
-
action: copy_file
always_execute: true
attributes:
dst_url: $dst
mode: $mode
overwrite: if-missing
src_url: files/automate/superautomate.py
name: "Copy awesome script to my node"
-
action: add_config
attributes:
url: files/templates/ma1.template
variables:
ipaddress: $ip
name: "configure ma1"
-
action: add_config
attributes:
url: files/templates/xmpp.template
variables: $variables
name: "configure ma1"
attributes:
dst: /mnt/flash
mode: 777
ip: 192.168.0.50
variables:
domain: im.example.com
user: myXmmpUser
passwd: secret
room: myAwesomeRoom
Explanation
^^^^^^^^^^^
This example shows how to use global variables within the definition. It's
important to see the difference between using variables to define attributes
of the action versus variables that get used within the template in an
``add_config`` action. See how the ``ipaddress`` variable is nested within
a ``variables`` key? Also, you can create a list in the ``attributes`` section
and pass the entire list into the action as shown in the XMPP config action.
.. note:: For more Action recipes see the Actions section.
.. End of Add an Action to a Definition
Add Custom Log Statements as Action Executes
--------------------------------------------
Objective
^^^^^^^^^
I want to send specific messages to my syslog and/or XMPP servers while an action
is executing. Especially, if something goes wrong, I'd like to add a helpful message
so the engineer knows who to contact.
Solution
^^^^^^^^
The node being provisioned will send predefined logs to the endpoints defined in
``[data_root]/bootstrap/bootstrap.conf``, but you can send additional client-side
logs by adding a few attributes to your definition.
Let's add some specific status messages to the definition below.
.. note:: This could be a static node definition in ``[data_root]/nodes/<SYSTEM_ID>/definition``
or a global definition in ``[data_root]/definitions/definition_name``.
.. code-block:: yaml
---
actions:
-
action: copy_file
always_execute: true
attributes:
dst_url: $dst
mode: $mode
overwrite: if-missing
src_url: files/automate/bgpautoinf.py
name: "Copy automate BGP script to node"
onstart: "Starting the action to copy the BGP script"
onsuccess: "SUCCESS: The BGP script has been copied"
onfailure: "ERROR: Failed to copy script - contact [email protected]"
attributes:
dst: /mnt/flash
mode: 777
Explanation
^^^^^^^^^^^
Here we make use of three specific keywords: ``onstart``, ``onsuccess`` and
``onfailure``. By adding these keys to your definition, the node will generate
this message while it is being provisioned. As mentioned above, this message will
be sent to all of the logging destinations defined in ``[data_root]/bootstrap/bootstrap.conf``.
.. note:: For help defining an XMPP or syslog endpoint, see :ref:`client-logging-label`
.. End of Add an Action to a Definition
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/definitions.rst | definitions.rst |
Resource Pools
==============
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Add a New Resource Pool
-----------------------
Objective
^^^^^^^^^
I'd like to add a new resource pool of IP addresses so that I can assign a new
IP to each node that gets provisioned.
.. note:: Resource Pools are simple ``key: value`` YAML files.
Solution
^^^^^^^^
**Create the resource pool**
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create a resource pool file
admin@ztpserver:~# vi resources/mgmt_ip
.. code-block:: yaml
192.168.0.2/24: null
192.168.0.3/24: null
192.168.0.4/24: null
192.168.0.5/24: null
192.168.0.6/24: null
192.168.0.7/24: null
192.168.0.8/24: null
192.168.0.9/24: null
192.168.0.10/24: null
Explanation
^^^^^^^^^^^
Resource Pool files are just ``key: value`` files. The default value for each
key should be ``null``. This makes the key available for assignment. If you would
like to pre-assign a specific node with a particular key, then just put the
node's node_id in place of ``null``. Resource Pools are analyzed when the
``allocate(pool_name)`` function is run from a definition. Note that you can
also use the ``allocate()`` function to perform a lookup when a node has
already been assigned a key.
.. End of Add a New Resource Pool
Clearing a Resource Pool
------------------------
Objective
^^^^^^^^^
I'd like to reset the values of a resource pool so that all values return to
``null``.
Solution
^^^^^^^^
You can use the ztps command line to perform this action.
.. code-block:: console
admin@ztpserver:~# ztps --clear-resources
.. note:: This will clear **ALL** resource pools
Explanation
^^^^^^^^^^^
Clearing all resource pools can be done via the command line on the ZTPServer.
The command will analyze ``data_root/resources`` and any file that exists in
that directory that resembles a ZTPServer resource pool will be cleared.
.. End of Clearing a Resource Pool
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/resourcePools.rst | resourcePools.rst |
Hello World - A Simple Provisioning Example
===========================================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
**Introduction**
The following set of recipes will help you perform a basic provisioning task
using the ZTPServer. There are some assumptions:
* You have already installed the ZTPServer
* You have performed the basic configuration to define which interface and port the server will run on.
* You have a DHCP server running with ``option bootfile-name "http://<ZTPSERVER-URL>:<PORT>/bootstrap";`` `Sample config <https://github.com/arista-eosplus/packer-ztpserver/blob/master/Fedora/conf/dhcpd.conf>`_
* Your test (v)EOS node can receive DHCP responses
* Make sure the ztps process is not running
.. note:: If you would like to test this in a virtual environment, please see the
`packer-ztpserver <https://github.com/arista-eosplus/packer-ztpserver>`_
Github repo to learn how to automatically install a ZTPServer with all
of the complementary services (DHCP, DNS, NTP, XMPP, and SYSLOG). Both
Virtual Box and VMware are supported.
Prepare Your Switch for Provisioning
------------------------------------
Objective
^^^^^^^^^
I want to prepare my test device (vEOS or EOS) for use with the ZTPServer. This
will put your switch into ZTP Mode, so backup any configs you want to save.
Solution
^^^^^^^^
Log into your (v)EOS node, then:
.. code-block:: console
switch-name> enable
switch-name# write erase
Proceed with erasing startup configuration? [confirm] y
switch-name# reload now
Explanation
^^^^^^^^^^^
ZTP Mode is enabled when a switch boots and there is no startup-config (or it's empty) found in
``/mnt/flash/``. Therefore, we use the ``write erase`` command to clear the current
startup-config and use ``reload now`` to reboot the switch. When the switch comes
up you will see it enter ZTP Mode and begin sending DHCP requests on all interfaces.
.. End of Prepare Your Switch for Provisioning
Add a Static Node Entry
-----------------------
Objective
^^^^^^^^^
I want to provision my switch based upon its System MAC Address.
Solution
^^^^^^^^
Log into your (v)EOS node to get its MAC Address. If it's in ZTP Mode, just log in
with username ``admin``:
.. code-block:: console
switch-name> show version
.. note:: Copy the System MAC Address for later.
Confirm your ZTPServer Configuration will identify a node based upon its MAC:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``identifier`` and confirm it's set to ``systemmac``:
.. code-block:: console
identifier = systemmac
Finally, let's create a nodes directory for this device:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the nodes directory, where all node information is stored
admin@ztpserver:~# cd nodes
# Create a directory using the MAC Address you found earlier
admin@ztpserver:~# mkdir 001122334455
Explanation
^^^^^^^^^^^
A node is considered to be statically provisioned when a directory with its
System ID is already located in the ``nodes/`` directory.
Note that the System ID can be the node's System MAC Address or its Serial Number.
In this case we chose to use the ``systemmac`` since vEOS nodes don't have a
Serial Number by default.
Just adding this directory is not enough to provision the node. The remaining
recipes will finish off the task.
.. End of Add a Static Node Entry
Create a Startup-Config with Minimal Configuration
--------------------------------------------------
Objective
^^^^^^^^^
When my node is provisioned, I want it to be passed a static startup-config. This config will include
some basic Management network info including syslog and ntp. It will set
the admin user's password to admin, and enable eAPI.
Solution
^^^^^^^^
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the specific node directory that you created earlier
admin@ztpserver:~# cd nodes/001122334455
# Create a startup-config
admin@ztpserver:~# vi startup-config
Copy and paste this startup-config, changing values where you see fit:
.. code-block:: console
!
hostname test-node-1
ip name-server vrf default <DNS-SERVER-IP>
!
ntp server <NTP-SERVER-IP>
!
username admin privilege 15 role network-admin secret admin
!
interface Management1
ip address <MGMT-IP-ADDRESS>/<SUBNET>
!
ip access-list open
10 permit ip any any
!
ip route 0.0.0.0/0 <DEFAULT-GW>
!
ip routing
!
management api http-commands
no shutdown
!
banner login
Welcome to $(hostname)!
This switch has been provisioned using the ZTPServer from Arista Networks
Docs: http://ztpserver.readthedocs.org/
Source Code: https://github.com/arista-eosplus/ztpserver
EOF
!
end
Explanation
^^^^^^^^^^^
When the ZTPServer receives a request from your node to begin provisioning, it
will find the directory ``nodes/001122334455`` and know that this node is
statically configured. In this case, a ``startup-config`` must be present. In
practice, the ZTPServer tells the node to perform the ``config_replace`` action
with this file as the source.
.. End of Create a startup-config file with minimal configuration
Add Event Handler to Backup the startup-config to the ZTPServer
---------------------------------------------------------------
Objective
^^^^^^^^^
I want to backup the latest startup-config from my node so that if I make changes
or have to replace the node I have the latest copy.
.. note:: By adding this, the node will perform an HTTP PUT and overwrite the
``nodes/001122334455/startup-config`` file.
Solution
^^^^^^^^
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the specific node directory that you created earlier
admin@ztpserver:~# cd nodes/001122334455
# Edit your startup-config
admin@ztpserver:~# vi startup-config
Add the following lines to your startup-config, changing values where needed:
.. code-block:: console
event-handler configpush
trigger on-startup-config
! For default VRF, make sure to update the ztpserver url
action bash export SYSMAC=`FastCli -p 15 -c 'show ver | grep MAC | cut -d" " -f 5' | sed 's/[.]*//g'`; curl http://<ZTPSERVER-URL>:<PORT>/nodes/$SYSMAC/startup-config -H "content-type: text/plain" --data-binary @/mnt/flash/startup-config -X PUT
! For non-default VRF, update and use:
! action bash export SYSMAC=`FastCli -p 15 -c 'show ver | grep MAC | cut -d" " -f 5' | sed 's/[.]*//g'`; ip netns exec ns-<VRF-NAME> curl http://<ZTPSERVER-URL>:<PORT>/nodes/$SYSMAC/startup-config -H "content-type: text/plain" --data-binary @/mnt/flash/startup-config -X PUT
Explanation
^^^^^^^^^^^
By adding this line to the startup-config, this configuration will be sent down
to the node during provisioning. From that point onward, the node will perform
and HTTP PUT of the startup-config and the ZTPServer will overwrite the
startup-config file in the node's directory.
.. End of Add Event Handler to Backup the startup-config to the ZTPServer
Install a Specific (v)EOS Version
---------------------------------
Objective
^^^^^^^^^
I want a specific (v)EOS version to be automatically installed when I provision
my node.
.. note:: This assumes that you've already downloaded the desired (v)EOS image
from `Arista <https://www.arista.com/en/support/software-download>`_.
Solution
^^^^^^^^
Let's create a place on the ZTPServer to host some SWIs:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create an images directory
admin@ztpserver:~# mkdir -p files/images
# SCP your SWI into the images directory, name it whatever you like
admin@ztpserver:~# scp admin@otherhost:/tmp/vEOS.swi files/images/vEOS_4.14.5F.swi
Now let's create a definition that performs the ``install_image`` action:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the specific node directory that you created earlier
admin@ztpserver:~# cd nodes/001122334455
# Create a definition file
admin@ztpserver:~# vi definition
Add the following lines to your definition, changing values where needed:
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
always_execute: true
attributes:
url: files/images/vEOS_4.14.5F.swi
version: 4.14.5F
name: "Install 4.14.5F"
.. note:: The definition uses YAML syntax
Explanation
^^^^^^^^^^^
The definition is where we list all of the `actions <http://ztpserver.readthedocs.org/en/master/config.html#actions>`_
we want the node to execute during the provisioning process. In this case we are
hosting the SWI on the ZTPServer, so we just define the ``url`` in relation
to the ``data_root``. We could change the ``url`` to point to another server
altogether - the choice is yours. The benefit in hosting the file on the
ZTPServer is that we perform an extra checksum step to validate the integrity of
the file.
In practice, the node requests its definition during the provisioning process. It
sees that it's supposed to perform the ``install_image`` action, so it
requests the ``install_image`` python script. It then performs an HTTP GET for
the ``url``. Once it has these locally, it executes the
``install_image`` `script <https://github.com/arista-eosplus/ztpserver/blob/develop/actions/install_image>`_.
.. End of Install a Specific (v)EOS Version
Start ZTPServer in Standalone Mode
----------------------------------
Objective
^^^^^^^^^
Okay, enough reading and typing; let's push some buttons!
Solution
^^^^^^^^
Let's run the ZTPServer in `Standalone Mode <http://ztpserver.readthedocs.org/en/master/startup.html#standalone-debug-server>`_
since this is just a small test. Login to your ZTPServer:
.. code-block:: console
# Start the ZTPServer - console loggin will appear
admin@ztpserver:~# ztps
INFO: [app:115] Logging started for ztpserver
INFO: [app:116] Using repository /usr/share/ztpserver
Starting server on http://<ZTPSERVER-URL>:<PORT>
Explanation
^^^^^^^^^^^
The easiest way to run the ZTPServer is in Standalone Mode - which is done by
typing ``ztps`` in a shell. This will cause the configured interface and port to start listening
for HTTP requests. Your DHCP server will provide the node with ``option bootfile-name "http://<ZTPSERVER-URL>:<PORT>/bootstrap"``
in the DHCP response, which lets the node know where to grab the bootstrap script.
**A Quick Overview of the Provisioning Process for this Node**
#. **GET /bootstrap**: The node gets the bootstrap script and begins executing it. The following requests are made while the bootstrap script is being executed.
#. **GET /bootstrap/config**: The node gets the bootstrap config which contains XMPP and Syslog information for the node to send logs to.
#. **POST /nodes**: The node sends information about itself in JSON format to the ZTPServer. The ZTPServer parses this info and finds the System MAC. It looks in the ``nodes/`` directory and finds a match.
#. **GET /nodes/001122334455**: The node requests its definition and learns what resources it has to retrieve.
#. **GET /actions/install_image**: The node retrieves the install_image script.
#. **GET /files/images/vEOS_4.14.5F.swi**: The node retrieves the SWI referenced in the definition.
#. **GET /meta/files/images/vEOS_4.14.5F.swi**: The node retrieves the checksum of the SWI for validation and integrity.
#. **GET /actions/replace_config**: The node retrieves the replace_config script.
#. **GET /nodes/001122334455/startup-config**: The node retrieves the startup-config we created earlier.
#. **GET /meta/nodes/001122334455/startup-config**: The node retrieves the checksum of the startup-config.
#. **Node Applies Config and Reboots**
#. **PUT /nodes/001122334455/startup-config**: The node uploads its current startup-config.
.. End of Start ZTPServer in Standalone Mode
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/helloWorld.rst | helloWorld.rst |
.. _client-logging-label:
Client-Side Logging
===================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Configure Syslog Logging
------------------------
Objective
^^^^^^^^^
I want to send client logs to a syslog server or a local file during provisioning.
Solution
^^^^^^^^
.. code-block:: console
# Edit the bootstrap configuration file
admin@ztpserver:~# vi /usr/share/ztpserver/bootstrap/bootstrap.conf
Add any syslog servers or files, be sure to choose the level of logging:
.. code-block:: yaml
---
logging:
-
destination: <SYSLOG-URL>:<PORT>
level: DEBUG
-
destination: file:/tmp/ztps-log
level: INFO
Explanation
^^^^^^^^^^^
The node will request the contents of the ``bootstrap.conf`` when it performs
``GET /bootstrap/config``. Once the node retrieves this information it will
send logs to the ``destination(s):`` listed under ``logging:``.
.. End of Configure Syslog Logging
Configure XMPP Logging
------------------------
Objective
^^^^^^^^^
I want to send client logs to specific XMPP server rooms.
Solution
^^^^^^^^
.. code-block:: console
# Edit the bootstrap configuration file
admin@ztpserver:~# vi /usr/share/ztpserver/bootstrap/bootstrap.conf
Add any XMPP servers and associated rooms:
.. code-block:: yaml
---
xmpp:
domain: <XMPP-SERVER-URL>
username: bootstrap
password: eosplus
rooms:
- ztps
- devops
- admins
Explanation
^^^^^^^^^^^
The node will request the contents of the ``bootstrap.conf`` when it performs
``GET /bootstrap/config`` file and try to join the rooms listed with the
credentials provided. Typically when joining a room, you would use a string
like, ``[email protected]``. Be sure to just provide the
``domain: xmpp-server.example.com`` leaving out the ``conference`` prefix.
.. note:: In order for XMPP logging to work, a non-EOS user need to be connected
to the room specified in bootstrap.conf, before the ZTP process starts.
The room has to be created (by the non-EOS user) before the bootstrap
client starts logging the ZTP process via XMPP.
.. End of Configure XMPP Logging
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/clientLogging.rst | clientLogging.rst |
Provision a Static Node
=======================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Add a Static Node Entry
-----------------------
Objective
^^^^^^^^^
I want to provision my switch based upon its System ID (System MAC Address
or Serial Number).
Solution
^^^^^^^^
Log into your (v)EOS node to get its System ID. If it's in ZTP Mode, just log in
with username ``admin``:
.. code-block:: console
switch-name> show version
.. note:: Copy down the System ID (System MAC Address or Serial Number).
Let's create a node directory for this device:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the nodes directory, where all node information is stored
admin@ztpserver:~# cd nodes
# Create a directory using the MAC Address you found earlier
admin@ztpserver:~# mkdir <SYSTEM_ID>
Explanation
^^^^^^^^^^^
A node is considered to be statically provisioned when a directory with its
System ID is already located in the ``nodes/`` directory.
Note that the System ID can be the node's System MAC Address or its Serial Number.
Just adding this directory is not enough to provision the node. The remaining
recipes will finish off the task. To successfully provision a node
statically, you will need to create:
* ``startup-config``
* ``pattern`` file - if Topology Validation is enabled
* ``definition`` - if you choose to apply other actions during provisioning
and place them in ``[data_root]/nodes/<SYSTEM_ID>``.
.. note:: Confirm your ZTPServer Configuration will identify a node based upon
the desired System ID by checking /etc/ztpserver/ztpserver.conf and
check the value of ``identifier``
.. End of Add a Static Node Entry
Create a Startup-Config File
----------------------------
Objective
^^^^^^^^^
I want the node to receive a startup-config during provisioning.
Solution
^^^^^^^^
Create a file named ``startup-config`` in ``[data_root]/nodes/<SYSTEM_ID>/``.
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the node directory you created above.
admin@ztpserver:~# cd nodes/<SYSTEM_ID>
# Create/edit the startup-config file
admin@ztpserver:~# vi startup-config
Place the desired configuration into the startup-config. Here's an example. Please
change values where you see fit:
.. code-block:: console
!
hostname test-node-1
ip name-server vrf default <DNS-SERVER-IP>
!
ntp server <NTP-SERVER-IP>
!
username admin privilege 15 role network-admin secret admin
!
interface Management1
ip address <MGMT-IP-ADDRESS>/<SUBNET>
!
ip access-list open
10 permit ip any any
!
ip route 0.0.0.0/0 <DEFAULT-GW>
!
ip routing
!
management api http-commands
no shutdown
!
banner login
Welcome to $(hostname)!
This switch has been provisioned using the ZTPServer from Arista Networks
Docs: http://ztpserver.readthedocs.org/
Source Code: https://github.com/arista-eosplus/ztpserver
EOF
!
end
Explanation
^^^^^^^^^^^
A startup-config file is required when you statically provision a node. The format
of the startup-config is the same as you are used to, which can be found on your
switch at file:startup-config (/mnt/flash/startup-config)
.. End of Create a Startup-Config File
Create a Pattern (Topology Validation enabled)
----------------------------------------------
Objective
^^^^^^^^^
I have created a static node directory and Topology Validation is
enabled, so I would like to make sure everything is wired up correctly before
provisioning a node.
.. note:: YAML syntax can be a pain sometimes. The indentation is done with
spaces and not tabs.
Solution
^^^^^^^^
Create a file named ``pattern`` in ``[data_root]/nodes/<SYSTEM_ID>/``
and define the LLDP associations.
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the node directory you created above.
admin@ztpserver:~# cd nodes/<SYSTEM_ID>
# Create/edit the pattern file
admin@ztpserver:~# vi pattern
**Example 1:** Match any neighbor
This pattern essentially disables Topology Validation.
.. code-block:: yaml
---
name: Match anything
interfaces:
- any: any:any
**Example 2:** Match any interface on a specific neighbor
This pattern says, the node being provisioned must be connected to a neighbor
with hostname ``pod1-spine1`` but it can be connected to any peer interface.
.. code-block:: yaml
---
name: Anything on pod1-spine1
interfaces:
- any: pod1-spine1:any
**Example 3:** Match specific interface on a specific neighbor
This pattern says, the node being provisioned must be connected to a neighbor
with hostname ``pod1-spine1`` on Ethernet1.
.. code-block:: yaml
---
name: Anything on pod1-spine1
interfaces:
- any: pod1-spine1:Ethernet1
**Example 4:** Make sure I'm not connected to a node
This pattern is the same as Example #2, but we add another check to make sure the
node being provisioned is not connected to any spines in ``pod2``.
.. code-block:: yaml
---
name: Not connected to anything in pod2
interfaces:
- any: pod1-spine1:any
- any: regex('pod2-spine\d+'):none
- none: regex('pod2-spine\d+'):any #equivalent to line above
**Example 5:** Using variables in the pattern
This pattern is similar to what you've seen above except we use variables
to make things easier.
.. code-block:: yaml
---
name: Not connected to any spine in pod2
variables:
- not_pod2: regex('pod2-spine\d+')
interfaces:
- any: pod1-spine1:any
- any: $not_pod2:none
Explanation
^^^^^^^^^^^
Pattern files are YAML-based and are the underpinnings of Topology Validation.
A node will not be successfully provisioned if it cannot pass all of the interface
tests contained within the pattern file. The examples above are just a small
sample of the complex associations you can create. Take a look at the
`neighbordb <http://ztpserver.readthedocs.org/en/master/config.html#dynamic-provisioning-neighbordb>`_
section to learn more.
.. note:: YAML can be a pain, and invalid YAML syntax will cause provisioning to
fail. You can make sure your syntax is correct by using a tool like
`YAMLlint <http://www.yamllint.com>`_
.. End of Create a Pattern (if Topology Validation is enabled)
Create a Definition File
------------------------
Objective
^^^^^^^^^
Aside from sending the node a startup-config, I'd like to upgrade the node to
a specific v(EOS) version.
Solution
^^^^^^^^
These types of system changes are accomplished via the ``definition`` file. The
definition is a YAML-based file with a section for each action that you
want to execute.
.. note:: Learn more about `Actions <http://ztpserver.readthedocs.org/en/master/config.html#actions>`_.
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Create an images directory
admin@ztpserver:~# mkdir -p files/images
# SCP your SWI into the images directory, name it whatever you like
admin@ztpserver:~# scp admin@otherhost:/tmp/vEOS.swi files/images/vEOS_4.14.5F.swi
Now let's create a definition that performs the ``install_image`` action:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the specific node directory that you created earlier
admin@ztpserver:~# cd nodes/<SYSTEM_ID>
# Create a definition file
admin@ztpserver:~# vi definition
Add the following lines to your definition, changing values where needed:
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
always_execute: true
attributes:
url: files/images/vEOS_4.14.5F.swi
version: 4.14.5F
name: "Install 4.14.5F"
Explanation
^^^^^^^^^^^
The definition is where we list all of the `actions <http://ztpserver.readthedocs.org/en/master/config.html#actions>`_
we want the node to execute during the provisioning process. In this case we are
hosting the SWI on the ZTPServer, so we just define the ``url`` in relation
to the ``data_root``. We could change the ``url`` to point to another server
altogether - the choice is yours. The benefit in hosting the file on the
ZTPServer is that we perform an extra checksum step to validate the integrity of
the file.
In practice, the node requests its definition during the provisioning process. It
sees that it's supposed to perform the ``install_image`` action, so it
requests the ``install_image`` python script. It then performs an HTTP GET for
the ``url``. Once it has these locally, it executes the
``install_image`` `script <https://github.com/arista-eosplus/ztpserver/blob/develop/actions/install_image>`_.
.. End of Create a Definition File
Create an Attributes File
-------------------------
Objective
^^^^^^^^^
I want to use variables in my definition and abstract the values to a unique file.
These variables will be sent down to the node during provisioning and be used while
the node is executing the actions listed in the definition.
Solution
^^^^^^^^
Create a file named ``attributes`` in ``[data_root]/nodes/<SYSTEM_ID>/``.
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Move to the node directory you created above.
admin@ztpserver:~# cd nodes/<SYSTEM_ID>
# Move to the node directory you created above.
admin@ztpserver:~# vi attributes
Here's the different type of ways to define the attributes:
**Example 1:** A simple key/value pair
.. code-block:: yaml
---
ntp_server: ntp.example.com
dns_server: ns1.example.com
**Example 2:** key/dictionary
.. code-block:: yaml
---
system_config:
ntp: ntp.example.com
dns: ns1.example.com
**Example 3:** key/list (note the hyphens)
.. code-block:: yaml
---
dns_servers:
- ns1.example.com
- ns2.example.com
- ns3.example.com
- ns4.example.com
**Example 4:** Referencing another variable
.. code-block:: yaml
---
ntp_server: ntp.example.com
other_var: $ntp_server
Borrowing from the definition recipe above, we can replace some values with
variables from the attributes file:
**nodes/<SYSTEM_ID>/definition**
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
always_execute: true
attributes:
url: $swi_url
version: $swi_version
name: $swi_name
and the **nodes/<SYSTEM_ID>/attributes**
.. code-block:: yaml
---
swi_url: files/images/vEOS_4.14.5F.swi
swi_version: 4.14.5F
swi_name: "Install 4.14.5F"
Explanation
^^^^^^^^^^^
The ``attributes`` file is optional. The variables that are contained within it
are sent to the node during provisioning. In the final example above you can see
how the attributes file and definition work in concert. Note that the ZTPServer
performs variable substitution when the node requests the definition via
GET /nodes/<SYSTEM_ID>. By removing the static values from the definition, we can
use the same definition for multiple nodes (using symlink) and just create unique
attributes files in the node's directory.
It's important to note that these variables can exist in different places and
accomplish the same task. In this recipe we created a unique attributes file,
which lives in the node's directory. You can also put these attributes directly
into the definition file like the example below.
**Example: At the global scope of the definition**
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
always_execute: true
attributes:
url: $swi_url
version: $swi_version
name: $swi_name
attributes:
swi_url: files/images/vEOS_4.14.5F.swi
swi_version: 4.14.5F
swi_name: "Install 4.14.5F"
.. End of Create an Attributes File
Symlink to a Generic Definition
-------------------------------
Objective
^^^^^^^^^
I'd like to use the same definition for multiple static node directories without
manually updating each one.
Solution
^^^^^^^^
Create one definition in the ``[data_root]/definitions`` folder and create a symlink
to the specific ``[data_root]/nodes/<SYSTEM_ID>/`` folder.
**``[data_root]/definitions/static_node_definition**
.. code-block:: yaml
---
name: static node definition
actions:
-
action: install_image
always_execute: true
attributes:
url: $swi_url
version: $swi_version
name: $swi_name
and the **nodes/<SYSTEM_ID>/attributes**
.. code-block:: yaml
---
swi_url: files/images/vEOS_4.14.5F.swi
swi_version: 4.14.5F
swi_name: "Install 4.14.5F"
then create the symlink
.. code-block:: console
# Go to your node's unique directory
admin@ztpserver:~# cd /usr/share/ztpserver/nodes/<SYSTEM_ID>
# Create the symlink
admin@ztpserver:~# ln -s /usr/share/ztpserver/definitions/static_node_definition ./definition
Explanation
^^^^^^^^^^^
The steps above let you reuse a single definition file for many static nodes. Note
that the variables are located in the attributes file in the ``nodes/<SYSTEM_ID>/``
folder.
.. End of Symlink to a Generic Definition
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/staticNodes.rst | staticNodes.rst |
Running the ZTPServer
=====================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Standalone - Change the ZTPServer Interface
-------------------------------------------
Objective
^^^^^^^^^
I only want the ZTPServer process to listen on a specific network interface.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``interface`` in the [server] group.
.. code-block:: console
# To listen on all interfaces
interface = 0.0.0.0
# To listen on a specific interface
interface = 192.0.2.100
Restart the ztps process:
.. code-block:: console
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps &
Explanation
^^^^^^^^^^^
This recipe helps you define a specific interface for the ZTPServer to listen on.
.. note:: Be sure the ``interface`` coincides with the ``server_url`` value in
the configuration file.
.. End of Standalone - Change the ZTPServer Interface
Standalone - Run ZTPServer on a Specific Port
---------------------------------------------
Objective
^^^^^^^^^
I want to define which port the ZTPServer listens on.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``port`` in the [server] group.
.. code-block:: console
# Choose a port of your liking
port = 8080
Restart the ztps process:
.. code-block:: console
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps &
Explanation
^^^^^^^^^^^
This recipe helps you define a specific port for the ZTPServer to listen on.
.. note:: Be sure the ``port`` coincides with the ``server_url`` value in
the configuration file.
.. End of Standalone - Change the ZTPServer Port
Standalone - Run ZTPServer in a Sub-directory
---------------------------------------------
Objective
^^^^^^^^^
I don't want to run the ZTPServer at the root of my domain, I want it in a
sub-directory.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``server_url`` in the [default] group.
.. code-block:: console
# Choose a subdirectory
server_url = http://ztpserver:8080/not/in/root/anymore
Restart the ztps process:
.. code-block:: console
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps &
Explanation
^^^^^^^^^^^
The ``server_url`` key defines where the REST API lives. You do not need to
change any of your file locations to affect change. Simply change the key above.
.. note:: You can confirm the change by doing a simple
``wget http://server:port/new/directory/path/bootstrap`` to retrieve
the bootstrap script.
.. End of Standalone - Run ZTPServer in a sub-directory
Apache - Run ZTPServer on a Specific Port
-----------------------------------------
Objective
^^^^^^^^^
I'm running ZTPServer as a WSGI with Apache and want to change what port it
listens on.
Solution
^^^^^^^^
Apache configurations can vary widely, and the ZTPServer has no control over this,
so view this simply as a suggestion.
Open up your Apache configuration file:
.. code-block:: console
# Apache
admin@ztpserver:~# vi /etc/apache2/sites-enabled/ztpserver.conf
# HTTPd
admin@ztpserver:~# vi /etc/httpd/conf.d/ztpserver.conf
Change the ``Listen`` and ``VirtualHost`` values to the desired port.
.. code-block:: apacheconf
LoadModule wsgi_module modules/mod_wsgi.so
Listen 8080
<VirtualHost *:8080>
WSGIDaemonProcess ztpserver user=www-data group=www-data threads=50
WSGIScriptAlias / /etc/ztpserver/ztpserver.wsgi
# Required for RHEL
#WSGISocketPrefix /var/run/wsgi
<Location />
WSGIProcessGroup ztpserver
WSGIApplicationGroup %{GLOBAL}
# For Apache <= 2.2, use Order and Allow
Order deny,allow
Allow from all
# For Apache >= 2.4, Allow is replaced by Require
Require all granted
</Location>
# Override default logging locations for Apache
#ErrorLog /path/to/ztpserver_error.log
#CustomLog /path/to/ztpserver_access.log
</VirtualHost>
Restart the ztps process:
.. code-block:: console
# Restart Apache
admin@ztpserver:~# service apache2 restart
Explanation
^^^^^^^^^^^
When you run ZTPServer as a WSGI under Apache or like server, the interface
and port that are used for listening for HTTP requests are controlled by the
web server. The config snippet above shows how this might be done with Apache,
but note that variations might arise in your own environment.
.. End of Apache - Run ZTPServer on a Specific Port
Apache - Run ZTPServer in a Sub-directory
-----------------------------------------
Objective
^^^^^^^^^
I'm running ZTPServer as a WSGI with Apache and I want to change the path that
the REST API resides.
Solution
^^^^^^^^
WSGI-compliant webserver configurations can vary widely, so here's a sample of
how this is done with Apache.
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``server_url`` in the [default] group.
.. code-block:: console
# Choose a subdirectory
server_url = http://ztpserver:8080/not/in/root/anymore
You might think that you have to change your Apache conf to move this to a
sub-directory, but you don't. Your config should look like the block below.
Note the ``<Location />``.
.. code-block:: apacheconf
LoadModule wsgi_module modules/mod_wsgi.so
Listen 8080
<VirtualHost *:8080>
WSGIDaemonProcess ztpserver user=www-data group=www-data threads=50
WSGIScriptAlias / /etc/ztpserver/ztpserver.wsgi
# Required for RHEL
#WSGISocketPrefix /var/run/wsgi
<Location />
WSGIProcessGroup ztpserver
WSGIApplicationGroup %{GLOBAL}
# For Apache <= 2.2, use Order and Allow
Order deny,allow
Allow from all
# For Apache >= 2.4, Allow is replaced by Require
Require all granted
</Location>
# Override default logging locations for Apache
#ErrorLog /path/to/ztpserver_error.log
#CustomLog /path/to/ztpserver_access.log
</VirtualHost>
Restart the ztps process:
.. code-block:: console
# Restart Apache
admin@ztpserver:~# service apache2 restart
Explanation
^^^^^^^^^^^
It might seem counter-intuitive but the Apache configuration should use the
``Location`` directive to point at root. The desired change to the path is done
by the ZTPServer ``server_url`` configuration value in ``/etc/ztpserver/ztpserver.conf``.
.. End of Apache - Run ZTPServer in a Sub-directory
Change ZTPServer File Ownership
-------------------------------
Objective
^^^^^^^^^
I'd like all of the ZTPServer provisioning files to be owned by a particular
user/group.
.. note:: This is most often needed when running the ZTPServer WSGI App and the
apache user is unable to read/write to ``/usr/share/ztpserver``.
Solution
^^^^^^^^
.. code-block:: console
admin@ztpserver:~# chown -R myUser:myGroup /usr/share/ztpserver
admin@ztpserver:~# chmod -R ug+rw /usr/share/ztpserver
Explanation
^^^^^^^^^^^
The shell commands listed above set ownership and permissions for the default
data_root location ``/usr/share/ztpserver``. Be mindful that if you are running
the ZTPServer WSGI App, the mod_wsgi daemon user must be able to read/write to
these files.
.. note:: When running the ZTPServer WSGI App, you should also check the
ownership and permission of ``/etc/ztpserver/ztpserver.wsgi``.
.. End of Change ZTPServer File Ownership
Apache - Configure SELinux Permissions
--------------------------------------
Objective
^^^^^^^^^
My server has SELinux enabled and I'd like to set the ZTPServer file type so
that Apache can read/write files in the data_root.
.. note:: This is most often needed when running the ZTPServer WSGI App and the
apache user is unable to read/write to ``/usr/share/ztpserver``.
Solution
^^^^^^^^
.. code-block:: console
# For Fedora - httpd
admin@ztpserver:~# chcon -Rv --type=httpd_sys_script_rw_t /usr/share/ztpserver
# For Ubuntu - Apache
admin@ztpserver:~# chcon -R -h system_u:object_r:httpd_sys_script_rw_t /usr/share/ztpserver
Explanation
^^^^^^^^^^^
The shell commands listed above set the SELinux file attributes so that Apache
can read/write to the files. This is often the case since ``/usr/share/ztpserver``
is not in the normal operating directory ``/var/www/``. Note that the commands
above are suggestions and you might consider tweaking them to suit your own
environment.
.. End of Apache - Configure SELinux Permissions
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/runningZTPS.rst | runningZTPS.rst |
ZTPServer Configuration
=======================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
.. The following sections are commented out since there are bugs
.. Define the Data_Root
--------------------
Objective
^^^^^^^^^
I know that the default location for ZTPServer files is ``/usr/share/ztpserver/``
but I'd like to use a different location.
Solution
^^^^^^^^
Open up your ZTPServer Global Config file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``data_root`` and change it to the desired directory:
.. code-block:: console
[default]
# Location of all ztps boostrap process data files
data_root = /this/directory/is/better
Explanation
^^^^^^^^^^^
The ``data_root`` is critical to the operation of the ZTPServer. The server will
look in this directory for the ``nodes/``, ``files/``, ``actions/``, ``bootstrap/``
directories as well as your ``neighbordb`` file. If you would like to make
further changes to the location of these directories, see the lower section of
``ztpserver.conf``.
.. End of Define the Data_Root
.. Define the Bootstrap File Location
----------------------------------
Objective
^^^^^^^^^
I'd like to change the filename and path of the bootstrap script.
Solution
^^^^^^^^
Open up your ZTPServer Global Config file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``data_root`` and change it to the desired directory:
.. code-block:: console
[default]
# Location of all ztps boostrap process data files
data_root = /this/directory/is/better
Explanation
^^^^^^^^^^^
The ``data_root`` is critical to the operation of the ZTPServer. The server will
look in this directory for the ``nodes/``, ``files/``, ``actions/``, ``bootstrap/``
directories as well as your ``neighbordb`` file. If you would like to make
further changes to the location of these directories, see the lower section of
``ztpserver.conf``.
.. End of Define the Bootstrap File Location
Identify Nodes Based Upon Serial Number
---------------------------------------
Objective
^^^^^^^^^
I'd like the ZTPServer to use the switch's serial number for provisioning. This
implies that all node directories in ``nodes/`` will be named using the serial
number.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``identifier`` and confirm it's set to ``serialnumber``:
.. code-block:: console
identifier = serialnumber
Restart the ztps process:
.. code-block:: console
# If using Apache WSGI
admin@ztpserver:~# service apache2 restart
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps
Explanation
^^^^^^^^^^^
The ZTPServer will use either the System MAC Address or the Serial Number
of the switch as its System ID. The System ID is used to match statically
provisioned nodes. Also, when a node is dynamically provisioned, the ZTPServer
will create a new node directory for it in ``nodes/`` and it will be named using
the System ID.
.. End of Identify Nodes Based Upon Serial Number
Identify Nodes Based Upon System MAC Address
--------------------------------------------
Objective
^^^^^^^^^
I'd like the ZTPServer to use the switch's System MAC Address for provisioning.
This implies that all node directories in ``nodes/`` will be named using the
System MAC Address.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``identifier`` and confirm it's set to ``systemmac``:
.. code-block:: console
identifier = systemmac
Restart the ztps process:
.. code-block:: console
# If using Apache WSGI
admin@ztpserver:~# service apache2 restart
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps
Explanation
^^^^^^^^^^^
The ZTPServer will use either the System MAC Address or the Serial Number
of the switch as its System ID. The System ID is used to match statically
provisioned nodes. Also, when a node is dynamically provisioned, the ZTPServer
will create a new node directory for it in ``nodes/`` and it will be named using
the System ID.
.. End of Identify Nodes Based Upon System MAC Address
Enable/Disable Topology Validation
----------------------------------
Objective
^^^^^^^^^
Topology Validation uses LLDP Neighbor information to make sure you have everything
wired up correctly. Topology Validation is enabled/disabled in the main ``ztpserver.conf``
configuration file.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``disable_topology_validation``
.. code-block:: console
# To disable Topology Validation
disable_topology_validation = True
# To enable Topology Validation
disable_topology_validation = False
Restart the ztps process:
.. code-block:: console
# If using Apache WSGI
admin@ztpserver:~# service apache2 restart
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps
Explanation
^^^^^^^^^^^
This configuration option enables/disables Topology Validation. This feature
is extremely powerful and can help you confirm all of your nodes are wired up
correctly. See the recipes under :ref:`tv-reference-label` to learn more about
the flexibility of Topology Validation.
.. End of Enable/Disable Topology Validation
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/configuration.rst | configuration.rst |
.. _tv-reference-label:
Topology Validation
===================
.. The line below adds a local TOC
.. contents:: :local:
:depth: 1
Enable/Disable Topology Validation
----------------------------------
Objective
^^^^^^^^^
Topology Validation uses LLDP Neighbor information to make sure you have everything
wired up correctly. Topology Validation is enabled/disabled in the main ``ztpserver.conf``
configuration file.
Solution
^^^^^^^^
Open up the global ZTPServer configuration file:
.. code-block:: console
admin@ztpserver:~# vi /etc/ztpserver/ztpserver.conf
Look for the line ``disable_topology_validation``
.. code-block:: console
# To disable Topology Validation
disable_topology_validation = True
#To enable Topology Validation
disable_topology_validation = False
Restart the ztps process:
.. code-block:: console
# If using Apache WSGI
admin@ztpserver:~# service apache2 restart
# If running in Standalone Mode, stop ztps
admin@ztpserver:~# pkill ztps
# Then start it again
admin@ztpserver:~# ztps
Explanation
^^^^^^^^^^^
This configuration option enables/disables Topology Validation. This feature
is extremely powerful and can help you confirm all of your nodes are wired up
correctly. See the recipes below to learn more about the flexibility of
Topology Validation.
.. End of Enable/Disable Topology Validation
Allow Any Neighbor
------------------
Objective
^^^^^^^^^
I want to provision a node without knowing anything about it. I just want it to
receive a default configuration.
Solution
^^^^^^^^
You can accomplish this by using neighbordb. Neighbordb contains associations
between LLDP neighbor patterns and definitions. So if we use a pattern that
matches anything, we can use it to assign a simple, default definition.
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Add the following lines to your definition, changing values where needed:
.. code-block:: yaml
---
patterns:
- name: Default Pattern
definition: default
interfaces:
- any: any:any
If you happen to be provisioning a node in isolation and the node does not have
any neighbors, use the following pattern:
.. code-block:: yaml
---
patterns:
- name: Default Pattern
definition: default
interfaces:
- none: none:none
Then add a definition to ``[data_root]/definitions/default``
.. note:: See the sections on Definitions and Actions to learn more.
Explanation
^^^^^^^^^^^
By placing this pattern in your neighbordb, the ZTPServer will allow this node
to be provisioned and will assign it the ``default`` definition. Use caution when
placing this pattern in your neighbordb as it might allow nodes to receive the
``default`` definition when you intend them to receive another pattern.
.. End of Allow Any Neighbor
Match Pattern with Exact String
-------------------------------
Objective
^^^^^^^^^
I want my node to be dynamically provisioned based upon a specific LLDP
neighbor association.
Solution
^^^^^^^^
Modify your neighbordb:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Then add the pattern that includes the required match.
.. code-block:: yaml
---
patterns:
- name: tora for pod1
definition: tora
interfaces:
- Ethernet1: dc1-pod1-spine1:Ethernet1
This pattern says that the node being provisioned must have a connection between
its Ethernet1 and dc1-pod1-spine1's Ethernet1.
Explanation
^^^^^^^^^^^
In this recipe we use neighbordb to link a pattern with a definition. When a node
executes the bootstrap script it will send the ZTPServer some information about
itself. The ZTPServer will not find any existing directory with the node's
System-ID (System MAC or Serial Number depending upon your configuration) so it
next checks neighbordb to try and find a match. The ZTPServer will analyze
the nodes LLDP neighbors, find the match in neighbordb and then apply the ``tora``
definition.
.. End of Identify a Node Based Upon Specific Neighbor
Match Pattern Using a Regular Expression
----------------------------------------
Objective
^^^^^^^^^
I want my node to be dynamically provisioned and I'd like to match certain
neighbors using regex.
Solution
^^^^^^^^
Modify your neighbordb:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Then add the pattern that includes the required match.
.. code-block:: yaml
---
patterns:
- name: tora for pod1
definition: tora
interfaces:
- Ethernet1: regex('dc1-pod1-spine\D+'):Ethernet1
This pattern says that the node being provisioned must have a connection between
its Ethernet1 and any dc1-pod1-spines Ethernet1.
Explanation
^^^^^^^^^^^
In this recipe we use neighbordb to link a pattern with a definition. When a node
executes the bootstrap script it will send the ZTPServer some information about
itself. The ZTPServer will not find any existing directory with the node's
System-ID (System MAC or Serial Number depending upon your configuration) so it
next checks neighbordb to try and find a match. The ZTPServer will analyze
the nodes LLDP neighbors, find the match in neighbordb and then apply the ``tora``
definition.
.. note:: There are a few different functions that you can use other than ``regex()``.
Check out this `section <http://ztpserver.readthedocs.org/en/master/config.html#variables>`_
to learn more.
.. End of Match Pattern Using a Regular Expression
Match Pattern That Includes a String
------------------------------------
Objective
^^^^^^^^^
I want my node to be dynamically provisioned and I'd like to match certain
neighbors as long as the neighbor hostname includes a certain string.
Solution
^^^^^^^^
Modify your neighbordb:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Then add the pattern that includes the required match.
.. code-block:: yaml
---
patterns:
- name: tora for pod1
definition: tora
interfaces:
- Ethernet1: includes('dc1-pod1'):Ethernet1
This pattern says that the node being provisioned must have a connection between
its Ethernet1 and any hostname that includes ``dc1-pod1`` Ethernet1.
Explanation
^^^^^^^^^^^
In this recipe we use neighbordb to link a pattern with a definition. When a node
executes the bootstrap script it will send the ZTPServer some information about
itself. The ZTPServer will not find any existing directory with the node's
System-ID (System MAC or Serial Number depending upon your configuration) so it
next checks neighbordb to try and find a match. The ZTPServer will analyze
the nodes LLDP neighbors, find the match in neighbordb and then apply the ``tora``
definition.
.. End of Match pattern that includes some string
Match Pattern That Excludes a String
------------------------------------
Objective
^^^^^^^^^
I want my node to be dynamically provisioned and I'd like to match certain
neighbors as long as the neighbor hostname excludes a certain string.
Solution
^^^^^^^^
Using the ``excludes()`` function allows you to match the inverse of the
``includes()`` function.
Modify your neighbordb:
.. code-block:: console
# Go to your data_root - by default it's /usr/share/ztpserver
admin@ztpserver:~# cd /usr/share/ztpserver
# Modify your neighbordb
admin@ztpserver:~# vi neighbordb
Then add the pattern that includes the required match.
.. code-block:: yaml
---
patterns:
- name: tora for pod1
definition: tora
interfaces:
- Ethernet1: includes('dc1-pod1'):Ethernet1
- any: excludes('spine'):Ethernet50
This pattern says that the node being provisioned must have a connection between
its Ethernet1 and any hostname that includes ``dc1-pod1`` Ethernet1.
Explanation
^^^^^^^^^^^
In this recipe we use neighbordb to link a pattern with a definition. When a node
executes the bootstrap script it will send the ZTPServer some information about
itself. The ZTPServer will not find any existing directory with the node's
System-ID (System MAC or Serial Number depending upon your configuration) so it
next checks neighbordb to try and find a match. The ZTPServer will analyze
the nodes LLDP neighbors, find the match in neighbordb and then apply the ``tora``
definition.
.. End of Match pattern that includes some string
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/topologyValidation.rst | topologyValidation.rst |
ZTPServer VM on EOS in a L2WOM
==============================
.. The line below adds a local TOC
.. contents:::local:
:depth: 1
Files Needed
------------
* ``ztps.vmdk`` : the VM disk image for the ZTPServer VM
* ``startup-config``: a text file (with no extension)
* ``ztps.sh`` : a bash shell script
* ``ztps.xml`` : an xml file
* ``fullrecover`` : an empty text file (with no extension)
* ``boot-config`` : a text file (with no extension); contains a single line: ``SWI=flash:EOS.swi``
* ``EOS.swi`` : download an EOS image and rename it to ``EOS.swi``
.. End of Files Needed
ztps.vmdk
---------
Objective
^^^^^^^^^
I want to create a ZTPServer vmdk file to use on EOS.
Solution
^^^^^^^^
The ZTPServer vmdk file can be created using either methods below:
1) Automatically Create a Full-Featured ZTPServer: https://github.com/arista-eosplus/packer-ZTPServer
2) Create your own VM and install ZTPServer as intructed in the "Installation" section
Explanation
^^^^^^^^^^^
The turnkey solution detailed on the github will create a full featured ztps.vmdk by executing a single command. The vmdk created using this method comes with certain parameters pre-defined (i.e. domain-name, root user credential, IP address, etc). If desired, you can change these parameters by logging into the VM after it's created.
The second method requires more manual work compare to the first method, but may be more suitable if you already have a VM build to your needs and simply want to add ZTPServer to it.
.. End of ztps.vmdk
startup-config
--------------
Objective
^^^^^^^^^
I need to prepare a startup-config for the first SPINE switch to enable ZTPServer.
Solution
^^^^^^^^
Essential parts of the configuration:
* ``event-handler ztps`` : used to start the shell script ``ztps.sh``
* ``virtual-machine ztps`` : used to start the ZTPServer VM on EOS
.. code-block:: console
interface Vlan1
ip address 192.168.1.10/24
event-handler ztps
trigger on-boot
action bash /mnt/flash/ztps.sh &
delay 300
virtual-machine ztps
config-file flash:/ztps.xml
enable
Explanation
^^^^^^^^^^^
The ``event-handler ztps`` is triggered on-boot to kickstart the shell script ``ztps.sh``. There is a delay of 300 seconds before the script will be executed, to make sure all the necessary systems are in place before we run the script. For details of the script please see the ``ztps.sh`` section.
External systems will connect to the VM via Vlan1 (other VLANs can be used as well). The host switch will connect to the VM via the Linux bridge (See ``ztps.sh``). Therefore in this scenario we will need to have 2 interfaces on the ZTPServer VM.
For details of the shell script ``ztps.sh`` please refer to the corresponding section below.
.. End of startup-config
ztps.sh
-------
Objective
^^^^^^^^^
I want to create a shell script to set up all the necessary environment for ZTPServer when the switch boots up.
Solution
^^^^^^^^
.. code-block:: console
#!/bin/bash
# This script is used with the event-handler so that on-boot, we will create linux bridge,
#enable ip.forwarding, restart the ZTPS VM, and start DHCPD
logger -t "ZTPS" -p local0.info "Starting the process for ZTPS VM deployment"
# Create Linux Bridge
sudo brctl addbr br0
sudo ifconfig br0 up
sudo ifconfig br0 172.16.130.254/24
logger -t "ZTPS" -p local0.info "Linux Bridge created"
#Now lets restart the ZTPS VM
sudo echo -e "enable\nconfigure terminal\nvirtual-machine ztps restart\n" | FastCli -M -e -p 15
logger -t "ZTPS" -p local0.info "ZTPS VM restarted"
Explanation
^^^^^^^^^^^
In order to enable connectivity to the VM locally (from the host switch), a Linux bridge interface needs to be created and assigned an IP in the same subnet as one of the interfaces on the VM.
The ZTPServer VM needs to be restarted after the switch boots up.
.. note:: The ZTPServer VM needs to have its default gateway pointed to the default gateway of Vlan1 (or your choice of VLAN).
.. End of ztps.sh
ztps.xml
--------
Objective
^^^^^^^^^
I want to prepare a KVM custom xml file to enable a VM on EOS.
Solution
^^^^^^^^
Key parts of the xml file to pay attention to:
* ``<domain type='kvm' id='1'>`` : id needs to be unique (if more than 1 VM)
* ``<driver name='qemu' type='vmdk'/>`` : make sure the type is ``vmdk``
* ``<source file='/mnt/usb1/ztps.vmdk'/>``: make sure the path is correct
* **Interface definition section** :
* MAC address in the xml need to match the MAC address of the interfaces on the ZTPServer VM.
* The first interface type is direct and is mapped to vlan1. This is the interface that will be used for other switches to reach the VM.
* The second interface type is bridge and is using Linux bridge. This interface is solely used for local host switch to VM connectivity.
.. code-block :: console
<domain type='kvm' id='1'>
<name>ztps</name>
<memory>1048576</memory>
<currentMemory>1048576</currentMemory>
<vcpu>1</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-1.4'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='vmdk'/>
<source file='/mnt/usb1/ztps.vmdk'/>
<target dev='hda' bus='ide'/>
<alias name='ide0-0-0'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>
<controller type='ide' index='0'>
<alias name='ide0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
</controller>
<interface type='direct'>
<mac address='08:00:27:bc:d7:38'/>
<source dev='vlan1' mode='bridge'/>
<target dev='macvtap0'/>
<model type='e1000'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='bridge'>
<mac address='08:00:27:85:0c:f8'/>
<source bridge='br0'/>
<target dev='macvtap1'/>
<model type='e1000'/>
<alias name='net1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</interface>
<serial type='pty'>
<source path='/dev/pts/5'/>
<target port='0'/>
<alias name='serial0'/>
</serial>
<console type='pty' tty='/dev/pts/5'>
<source path='/dev/pts/5'/>
<target type='serial' port='0'/>
<alias name='serial0'/>
</console>
<input type='tablet' bus='usb'>
<alias name='input0'/>
</input>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='5900' autoport='no' listen='0.0.0.0'/>
<video>
<model type='vga' vram='8192' heads='1'/>
<alias name='video0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<memballoon model='virtio'>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</memballoon>
</devices>
</domain>
Explanation
^^^^^^^^^^^
The interface definition section defines how the interface(s) of the VM should be initialized. Since the vmdk already has interfaces defined/initialized, we have to use the same MAC address in the KVM definition file.
In the first interface definition we use ``interface type='direct'``. In this configuration we map the first interface of the VM to the ``vlan1`` interface directly, enabling connectivity to the VM from external of the host switch. However, ``interface type='direct'`` does not allow for host switch to VM connectivity, therefore we need to define a second interface with ``interface type='bridge'`` and map that to the Linux bridge for this puspose.
The reason we could not just bridge Vlan1 with the Linux bridge (and therefore just use one interface to enable both local and external connectivity) is because when we enslave an interface to br0, that interface cannot have an IP address on it, otherwise the connectivity would break.
.. End of ztps.xml | ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/ztpsVMonEOS/l2wom.rst | l2wom.rst |
ZTPServer VM on EOS in a L2L3WM
===============================
.. The line below adds a local TOC
.. contents:::local:
:depth: 1
Files Needed
------------
* ``ztps.vmdk`` : the VM disk image for the ZTPServer VM
* ``startup-config``: a text file (with no extension)
* ``ztps.sh`` : a bash shell script
* ``ztps.xml`` : an xml file
* ``fullrecover`` : an empty text file (with no extension)
* ``boot-config`` : a text file (with no extension); contains a single line: ``SWI=flash:EOS.swi``
* ``EOS.swi`` : download an EOS image and rename it to ``EOS.swi``
.. End of Files Needed
ztps.vmdk
---------
Objective
^^^^^^^^^
I want to create a ZTPServer vmdk file to use on EOS.
Solution
^^^^^^^^
The ZTPServer vmdk file can be created using either methods below:
1) Automatically Create a Full-Featured ZTPServer: https://github.com/arista-eosplus/packer-ZTPServer
2) Create your own VM and install ZTPServer as intructed in the "Installation" section
Explanation
^^^^^^^^^^^
The turnkey solution detailed on the github will create a full featured ztps.vmdk by executing a single command. The vmdk created using this method comes with certain parameters pre-defined (i.e. domain-name, root user credential, IP address, etc). If desired, you can change these parameters by logging into the VM after it's created.
The second method requires more manual work compare to the first method, but may be more suitable if you already have a VM build to your needs and simply want to add ZTPServer to it.
.. End of ztps.vmdk
startup-config
--------------
Objective
^^^^^^^^^
I need to prepare a startup-config for the first SPINE switch to enable ZTPServer.
Solution
^^^^^^^^
Essential parts of the configuration:
* ``event-handler ztps`` : used to start the shell script ``ztps.sh``
* ``virtual-machine ztps`` : used to start the ZTPServer VM on EOS
.. code-block:: console
interface Management1
ip address 192.168.1.10/24
event-handler ztps
trigger on-boot
action bash /mnt/flash/ztps.sh &
delay 300
virtual-machine ztps
config-file flash:/ztps.xml
enable
Explanation
^^^^^^^^^^^
The ``event-handler ztps`` is triggered on-boot to kickstart the shell script ``ztps.sh``. There is a delay of 300 seconds before the script will be executed, to make sure all the necessary systems are in place before we run the script. For details of the script please see the ``ztps.sh`` section.
External systems will connect to the VM via the management network. The host switch will connect to the VM via the Linux bridge (See ``ztps.sh``). Therefore in this scenario we will need to have 2 interfaces on the ZTPServer VM.
For details of the shell script ``ztps.sh`` please refer to the corresponding sectio below.
.. End of startup-config
ztps.sh
-------
Objective
^^^^^^^^^
I want to create a shell script to set up all the necessary environment for ZTPServer when the switch boots up.
Solution
^^^^^^^^
.. code-block:: console
#!/bin/bash
# This script is used with the event-handler so that on-boot, we will create linux bridge,
#enable ip.forwarding, restart the ZTPS VM, and start DHCPD
logger -t "ZTPS" -p local0.info "Starting the process for ZTPS VM deployment"
# Create Linux Bridge
sudo brctl addbr br0
sudo ifconfig br0 up
sudo ifconfig br0 172.16.130.254/24
logger -t "ZTPS" -p local0.info "Linux Bridge created"
#Now lets restart the ZTPS VM
sudo echo -e "enable\nconfigure terminal\nvirtual-machine ztps restart\n" | FastCli -M -e -p 15
logger -t "ZTPS" -p local0.info "ZTPS VM restarted"
Explanation
^^^^^^^^^^^
In order to enable connectivity to the VM locally (from the host switch), a Linux bridge interface needs to be created and assigned an IP in the same subnet as one of the interfaces on the VM.
The ZTPServer VM needs to be restarted after the switch boots up.
.. note:: The ZTPServer VM needs to have its default gateway pointed to the default gateway of the management network.
.. End of ztps.sh
ztps.xml
--------
Objective
^^^^^^^^^
I want to prepare a KVM custom xml file to enable a VM on EOS.
Solution
^^^^^^^^
Key parts of the xml file to pay attention to:
* ``<domain type='kvm' id='1'>`` : id needs to be unique (if more than 1 VM)
* ``<driver name='qemu' type='vmdk'/>`` : make sure the type is ``vmdk``
* ``<source file='/mnt/usb1/ztps.vmdk'/>``: make sure the path is correct
* **Interface definition section** :
* MAC address in the xml need to match the MAC address of the interfaces on the ZTPServer VM.
* The first interface type is direct and is mapped to ma1. This is the interface that will be used for other switches to reach the VM.
* The second interface type is bridge and is using Linux bridge. This interface is solely used for local host switch to VM connectivity.
.. code-block :: console
<domain type='kvm' id='1'>
<name>ztps</name>
<memory>1048576</memory>
<currentMemory>1048576</currentMemory>
<vcpu>1</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-1.4'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='vmdk'/>
<source file='/mnt/usb1/ztps.vmdk'/>
<target dev='hda' bus='ide'/>
<alias name='ide0-0-0'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>
<controller type='ide' index='0'>
<alias name='ide0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
</controller>
<interface type='direct'>
<mac address='08:00:27:bc:d7:38'/>
<source dev='ma1' mode='bridge'/>
<target dev='macvtap0'/>
<model type='e1000'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='bridge'>
<mac address='08:00:27:85:0c:f8'/>
<source bridge='br0'/>
<target dev='macvtap1'/>
<model type='e1000'/>
<alias name='net1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</interface>
<serial type='pty'>
<source path='/dev/pts/5'/>
<target port='0'/>
<alias name='serial0'/>
</serial>
<console type='pty' tty='/dev/pts/5'>
<source path='/dev/pts/5'/>
<target type='serial' port='0'/>
<alias name='serial0'/>
</console>
<input type='tablet' bus='usb'>
<alias name='input0'/>
</input>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='5900' autoport='no' listen='0.0.0.0'/>
<video>
<model type='vga' vram='8192' heads='1'/>
<alias name='video0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<memballoon model='virtio'>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</memballoon>
</devices>
</domain>
Explanation
^^^^^^^^^^^
The interface definition section defines how the interface(s) of the VM should be initialized. Since the vmdk already has interfaces defined/initialized, we have to use the same MAC address in the KVM definition file.
In the first interface definition we use ``interface type='direct'``. In this configuration we map the first interface of the VM to the ``ma1`` interface directly, enabling connectivity to the VM from external of the host switch. However, ``interface type='direct'`` does not allow for host switch to VM connectivity, therefore we need to define a second interface with ``interface type='bridge'`` and map that to the Linux bridge for this puspose.
The reason we could not just bridge ma1 with the Linux bridge (and therefore just use one interface to enable both local and external connectivity) is because when we enslave an interface to br0, that interface cannot have an IP address on it, otherwise the connectivity would break.
.. End of ztps.xml
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/ztpsVMonEOS/l2l3wm.rst | l2l3wm.rst |
ZTPServer VM on EOS in a L3WOM
==============================
.. The line below adds a local TOC
.. contents:::local:
:depth: 1
Files Needed
------------
* ``ztps.vmdk`` : the VM disk image for the ZTPServer VM
* ``startup-config``: a text file (with no extension)
* ``ztps.sh`` : a bash shell script
* ``ztps.xml`` : an xml file
* ``dhcpd.conf`` : a text file for Linux dhcpd configuration
* ``dhcpd.rpm`` : a DHCP server RPM to be installed on EOS
* ``ztps_daemon`` : a python script
* ``fullrecover`` : an empty text file (with no extension)
* ``boot-config`` : a text file (with no extension); contains a single line: ``SWI=flash:EOS.swi``
* ``boot-extention``: a text file (with no extention); contains a single like: ``dhcpd.rpm``
* ``EOS.swi`` : download an EOS image and rename it to ``EOS.swi``
.. End of Files Needed
ztps.vmdk
---------
Objective
^^^^^^^^^
I want to create a ZTPServer vmdk file to use on EOS.
Solution
^^^^^^^^
The ZTPServer vmdk file can be created using either methods below:
1) Automatically Create a Full-Featured ZTPServer: https://github.com/arista-eosplus/packer-ZTPServer
2) Create your own VM and install ZTPServer as intructed in the "Installation" section
Explanation
^^^^^^^^^^^
The turnkey solution detailed on the github will create a full featured ztps.vmdk by executing a single command. The vmdk created using this method comes with certain parameters pre-defined (i.e. domain-name, root user credential, IP address, etc). If desired, you can change these parameters by logging into the VM after it's created.
The second method requires more manual work compare to the first method, but may be more suitable if you already have a VM build to your needs and simply want to add ZTPServer to it.
.. End of ztps.vmdk
startup-config
--------------
Objective
^^^^^^^^^
I need to prepare a startup-config for the first SPINE switch to enable ZTPServer.
Solution
^^^^^^^^
Essential parts of the configuration:
* ``interface Loopback2`` : need a loopback interface on the same subnet as the VM
* ``daemon ztps`` : used to run the ``ztps.daemon`` python script in the background
* ``event-handler ztps`` : used to start the shell script ``ztps.sh``
* ``virtual-machine ztps`` : used to start the ZTPServer VM on EOS
* ``management api http-commands``: need to enable eAPI for ``daemon ztps`` to function
.. code-block:: console
interface Loopback2
ip address 172.16.130.253/24
daemon ztps
command /mnt/flash/ztps_daemon &
event-handler ztps
trigger on-boot
action bash /mnt/flash/ztps.sh &
delay 300
virtual-machine ztps
config-file flash:/ztps.xml
enable
management api http-commands
protocol http localhost
no shutdown
Explanation
^^^^^^^^^^^
The ``event-handler ztps`` is triggered on-boot to kickstart the shell script ``ztps.sh``. There is a delay of 300 seconds before the script will be executed, to make sure all the necessary systems are in place before we run the script. For details of the script please see the ``ztps.sh`` section.
The ``management api http-commands`` section enables Arista eAPI on the host swithc; eAPI is leveraged by the ``ztps_daemon``. eAPI can be accessed remotely via http or https, or it can be accessed locally via http, or by binding to a UNIX socket (only available on 4.14.5F onward). Since the daemon is a script that runs locally, we can either enalbe eAPI on the localhost via http (if you are running 4.14.5F or later), or we can just enable eAPI over https (this will require authentication).
The ``daemon ztps`` section runs a python script in the back ground as a daemon to restart DHCPD whenever an interface comes up.
For details of the shell script ``ztps.sh`` and the python script ``ztps_daemon`` please refer to the corresponding sectio below.
.. note:: The loopback interface is only needed if you plan to bootstrap a L3 ECMP fabric without a management network. In this scenario, the loopback address needs to be advertised in the ECMP routing protocol to enable connectivity for the downstream deviecs in the fabric.
.. End of startup-config
ztps.sh
-------
Objective
^^^^^^^^^
I want to create a shell script to set up all the necessary environment for ZTPServer when the switch boots up.
Solution
^^^^^^^^
.. code-block:: console
#!/bin/bash
# This script is used with the event-handler so that on-boot, we will create linux bridge,
#enable ip.forwarding, restart the ZTPS VM, and start DHCPD
logger -t "ZTPS" -p local0.info "Starting the process for ZTPS VM deployment"
# Create Linux Bridge
sudo brctl addbr br0
sudo ifconfig br0 up
sudo ifconfig br0 172.16.130.254/24
logger -t "ZTPS" -p local0.info "Linux Bridge created"
# Enable ip.forwarding
sudo sysctl net.ipv4.conf.all.forwarding=1
sudo sysctl net.ipv4.ip_forward=1
logger -t "ZTPS" -p local0.info "ip.forwarding enabled"
# Move the DHCP server RPM to the appropriate folder on EOS for installation
# Move the dhcpd.conf file to the appropriate folder
sudo cp /mnt/flash/dhcp-4.2.0-23.P2.fc14.i686.rpm /mnt/flash/.extensions/dhcpd.rpm
sudo cp /mnt/flash/dhcpd.conf /etc/dhcp/
sudo /usr/sbin/dhcpd
sleep 5
#make sure dhcpd is running before we continue
ps aux | grep "dhcpd" | grep -v grep
if [ $? -eq 0 ]
then
{
logger -t "ZTPS" -p local0.info "DHCPD is running. Restart ZTPS VM."
#Now lets restart the ZTPS VM
sudo echo -e "enable\nconfigure terminal\nvirtual-machine ztps restart\n" | FastCli -M -e -p 15
logger -t "ZTPS" -p local0.info "ZTPS VM restarted"
exit 0
}
else
logger -t "ZTPS" -p local0.info "Looks like DHCPD didn't start. Lets sleep for a few seconds and try again"
sleep 10
fi
Explanation
^^^^^^^^^^^
In order to enable connectivity to the VM from both remotely and locally (from the host switch), a Linux bridge interface needs to be created and assigned an IP in the same subnet as the VM; Linux ``ip.forwarding`` also needs to be enabled in the kernel for the packets to be routed to the VM.
EOS does not come with dhcpd preinstalled, there a DHCP-Server RPM needs to be downloaded, installed and started. Dowdload the RPM from `here <https://docs.google.com/a/arista.com/document/d/1fmhvousmZYr8Sidiv9rBf_PZDT-65QX0um215s_9K0c/edit#>`_ and rename it to ``dhcpd.rpm``. The RPM needs to be moved to the ``/mnt/flash/.extension`` location, and a ``boot-extension`` file, with the RPM specified, needs to be present in ``/mnt/flash`` in order for the RPM to be installed persistently after a reboot.
The ZTPServer VM needs to be restarted after the switch boots up.
.. note:: The ZTPServer VM needs to have its default gateway pointed to the br0 interface IP address.
.. End of ztps.sh
ztps.xml
--------
Objective
^^^^^^^^^
I want to prepare a KVM custom xml file to enable a VM on EOS.
Solution
^^^^^^^^
Key parts of the xml file to pay attention to:
* ``<domain type='kvm' id='1'>`` : in case multiple VMs are running on the system, make sure the configured ID is unique
* ``<driver name='qemu' type='vmdk'/>`` : make sure the type is ``vmdk``
* ``<source file='/mnt/usb1/ztps.vmdk'/>``: make sure the path is correct
* ``<mac address='08:00:27:85:0c:f8'/>`` : make sure this MAC matches the MAC address of the interface on the ZTPServer VM that you intend to use for connectivity
* ``<target dev='vnet0'/>`` : make sure the target device type is ``vnet0``
.. code-block :: console
<domain type='kvm' id='1'>
<name>ztps</name>
<memory>1048576</memory>
<currentMemory>1048576</currentMemory>
<vcpu>1</vcpu>
<os>
<type arch='x86_64' machine='pc-i440fx-1.4'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='vmdk'/>
<source file='/mnt/usb1/ztps.vmdk'/>
<target dev='hda' bus='ide'/>
<alias name='ide0-0-0'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>
<controller type='ide' index='0'>
<alias name='ide0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
</controller>
<interface type='bridge'>
<mac address='08:00:27:85:0c:f8'/>
<source bridge='br0'/>
<target dev='vnet0'/>
<model type='e1000'/>
<alias name='net0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</interface>
<serial type='pty'>
<source path='/dev/pts/5'/>
<target port='0'/>
<alias name='serial0'/>
</serial>
<console type='pty' tty='/dev/pts/5'>
<source path='/dev/pts/5'/>
<target type='serial' port='0'/>
<alias name='serial0'/>
</console>
<input type='tablet' bus='usb'>
<alias name='input0'/>
</input>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='5900' autoport='no' listen='0.0.0.0'/>
<video>
<model type='vga' vram='8192' heads='1'/>
<alias name='video0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<memballoon model='virtio'>
<alias name='balloon0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</memballoon>
</devices>
</domain>
Explanation
^^^^^^^^^^^
The interface definition section defines how the interface(s) of the VM should be initialized. Since the vmdk already has interfaces defined/initialized, we have to use the same MAC address in the KVM definition file.
The target device type should be vnet0 to enable connectivity to the VM from both remotely and locally from the host switch. Another choice here is the macvtap device type but this type prohibits connectivity for any locally routed packets (i.e. when the routing action to the VM takes place on the host switch).
.. End of ztps.xml
dhcpd.conf
----------
Objective
^^^^^^^^^
I want to prepare a dhcpd.conf file for running DHCPD on EOS.
Solution
^^^^^^^^
.. code-block :: console
class "ARISTA" {
match if substring(option vendor-class-identifier, 0, 6) = "Arista";
option bootfile-name "http://172.16.130.10:8080/bootstrap";
}
# Example
subnet 10.1.1.0 netmask 255.255.255.252 {
option routers 10.1.1.1;
default-lease-time 86400;
max-lease-time 86400;
pool {
range 10.1.1.2 10.1.1.2;
allow members of "ARISTA";
}
}
Explanation
^^^^^^^^^^^
The ``class "ARISTA"`` section defines a match criteria so that any subnet defition that uses this class would only allocate IPs if the requestor is an Arista device. This class also defines a bootstrap file that will be downloaded to the requestor.
.. note:: The IP address and TCP port number defined for the bootfile needs to match the ZTPServer VM configuration.
The subnet section provides an example to show you how it can be defined. If you are bootstrapping a L3 ECMP network without a management network, this section needs to be repeated for every p-to-p links connecting to every leaf switches.
.. note:: The ZTPServer VM also runs dhcpd, but in the scenario of L3 ECMP without a management network, we are unable to leverage that. This is because DHCP relay from the host switch to the VM is currently not supported in EOS.
.. End of dhcpd.conf
ztps_daemon
-----------
Objective
^^^^^^^^^
I want to create a python script that restarts DHCPD whenever an interface comes up.
Solution
^^^^^^^^
.. code-block:: python
#!/usr/bin/env python3
import jsonrpclib
import os
import time
#PROTO = "https"
#USERNAME = "admin"
#PASSWORD = "admin"
#HOSTNAME = "172.16.130.20"
class EapiClient(object):
'''
Instantiate a Eapi connection client object
for interacting with EAPI
'''
def __init__(self):
# For EOS 4.14.5F and later, you can enable locally run scripts without needing to authenticate
# If you are running earlier versions, just uncomment next line and also the CONSTANTS above
#switch_url = '{}://{}:{}@{}/command-api'.format(PROTO, USERNAME, PASSWORD, HOSTNAME)
switch_url = 'http://localhost:8080/command-api'
self.client = jsonrpclib.Server(switch_url)
def connected_interfaces(self):
cmd = "show interfaces status connected"
response = self.client.runCmds(1, [cmd])[0]
connected_intfs = response['interfaceStatuses'].keys()
return connected_intfs
def restart_dhcpd(eapi):
'''
Monitor the connected interfaces.
If there are newly connected interface(s), restart dhcpd
'''
connected_intfs = []
while True:
new_connected_intfs = eapi.connected_interfaces()
for intf in new_connected_intfs:
if intf not in connected_intfs:
os.system('sudo service dhcpd restart')
connected_intfs = new_connected_intfs
time.sleep(10)
def main():
eapi = EapiClient()
restart_dhcpd(eapi)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
Explanation
^^^^^^^^^^^
DHCPD only binds to interfaces that are UP when the process started. Since we are running DHCPD directly on the SPINE switch, there is no gaurantee that the interfaces connected to the LEAFs are up when DHCPD started. Therefore, we need to run a script/daemon in the background to continuously check the connected interface status, and if new interfaces came up, DHCPD would be restarted to bind to the newly connected interfaces.
.. End of ztps_daemon
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/docs/cookbook/ztpsVMonEOS/l3wom.rst | l3wom.rst |
This directory holds test definitions for testing nodes and patterns against neighbordb. Each file represents a test that includes a neighbordb pattern and a test_definition.
* Step 1: To create a new test case create a new YAML file in test/neighbordb (name must end in '_test')
* Step 2: Add neighbordb pattern to YAML file
* Step 3: Create node information to YAML file
Example:
name: example test
neighbordb:
variables:
ny_pod: regex("ny\d+")
patterns:
- name: sample node template 1
definition: test
node: 000c29f5d27da
interfaces:
- Ethernet1: any
- Ethernet3: localhost:Ethernet1
- Ethernet49: localhost:Ethernet49
- Ethernet50:
device: localhost
port: Ethernet50
nodes:
- node: 2b3d.json
details:
model: vEOS
serialnumber: 1234567890
systemmac: 00:1c:73:1a:2b:3d
version: 4.12.0
neighbors:
Ethernet1:
-
device: pod1-spine1
port: Ethernet1
matches: 0
match_includes:
- sample node template 1
| ztpserver | /ztpserver-2.0.0.tar.gz/ztpserver-2.0.0/test/neighbordb/README.md | README.md |
ZTQ:Zopen Task Queue
===========================================
简介
--------------------
ZTQ 队列服务, 分为3个包:ztq_core, ztq_worker, ztq_console。默认使用redis作为队列的后端。
ztq_core ::
提供一系列的方法把任务push到队列中,由ztq_worker去获取队列任务并且执行。
你可以在这里找到它: http://pypi.python.org/pypi/ztq_core/
ztq_worker::
队列的接收端,以线程为单位阻塞式的去监视一个队列。每一个线程称为Worker
当有任务push到了队列中,相应的Worker会自动pull下来去执行。
你可以在这里找到它: http://pypi.python.org/pypi/ztq_worker/
ztq_console::
对每一个队列的每一个任务执行情况进行监控、下达指令。这个包是可选的
你可以在这里找到它: http://pypi.python.org/pypi/ztq_console/
关于 ZTQ
--------------------
::
* 开源, 使用MIT 许可
* 基于Python, 容易使用和修改
* 支持linux 和 windows
* 可靠,可以应付突然断电等情况
* 可管理,自身带有ztq_console 监控后台
* 灵活,可以在不同的机器上运行多个Worker, 并且随时热插拔Worker
* 使用简单
安装
--------------------
::
pip install ztq_core
pip install ztq_worker
pip install ztq_console
使用
-------------------
#. 先定义一个普通的任务 ::
# my_send.py
def send(body):
print ‘START: ‘, body
sleep(5)
print ‘END:’, body
def send2(body):
print ‘START2’, body
raise Exception(‘connection error’)
#. 将普通的任务改成队列任务 ::
# my_send.py
import time
from ztq_core import async
@async # 使用默认队列default
def send(body):
print ‘START: ‘, body
sleep(5)
print ‘END:’, body
@async(queue=‘mail’) # 使用队列mail
def send(body):
print ‘START2’, body
raise Exception(‘connection error’)
#. 运行worker ::
# 运行:bin/ztq_worker app.ini
# app.ini 例子, 在ztq_worker 包里面有个config 目录放有app.ini 这个文件
[server]
host = localhost
port = 6379
db = 0
alias = w01
active_config = false
modules = my_send # 所有需要import的任务模块,每个一行
[queues]
default= 0 # default队列,起1个处理线程
mail = 0, 0 # mail队列,起2个处理线程
[log]
handler_file = ./ztq_worker.log
level = ERROR
#. 运行 ::
import ztq_core
from my_send import send
# 设置 Redis 连接
ztq_core.setup_redis(‘default’, ‘localhost’, 6379, 0)
send(‘hello, world’)
# 动态指定queue
send(‘hello world from mail’, ztq_queue=‘mail’)
#. 更详细的测试例子可见ztq_core包下的demo.py
使用更高级的特征
--------------------------
#. 抢占式执行 ::
# 后插入先执行。如果任务已经在队列,会优先
send (body, ztq_first=True)
#. 探测任务状态 ::
# ztq_first存在就优先, ztq_run不存在就运行
# 返回的是"running" 代表正在运行, 是"queue" 代表正在排队
# 如果是"error" 代表出错, 是"none" 代表这个任务不在排队,也没在执行
ping_task(send, body, ztq_first=True, ztq_run=True)
#. 支持事务 ::
import transaction
ztq_core.enable_transaction(True)
send_mail(from1, to1, body1)
send_mail(from2, to2, body2)
transaction.commit()
# 也可以单独关闭事务
send_mail(from2, to2, body2, ztq_transaction=False)
#. 定时任务 ::
from ztq_core.async import async
from ztq_core import redis_wrap
from ztq_core.cron import has_cron, add_cron_job
@async(queue='clock-0')
def bgrewriteaof():
""" 将redis的AOF文件压缩 """
redis = redis_wrap.get_redis()
redis.bgrewriteaof()
# 如果队列上没有这个定时任务,就加上。自动定时压缩reids
if not has_cron(bgrewriteaof):
add_cron({'hour':1}, bgrewriteaof)
#. 任务串行 ::
from ztq_core import prepare_task
# 根据(方法,参数)生成一个任务
callback = prepare_task(send, body)
# 执行完 send_mail 之后队列会自动将callback 放入指定的队列
send_mail(body, ztq_callback=callback)
#. 异常处理 ::
from ztq_core import prepare_task
@async(queue='mail')
def fail_callback(return_code, return_msg):
print return_code, return_msg
fcallback = prepare_task(send2)
# 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
send(body, ztq_fcallback=fcallback)
#. 进度回调 ::
import ztq_worker
@async(queue='doc2pdf')
def doc2pdf(filename):
...
# 可被进度回调函数调用
ztq_worker.report_progress(page=2)
...
from ztq_core import prepare_task
pcallback = prepare_task(send2, body)
doc2pdf(filename, ztq_pcallback=pcallback)
#. 批处理 ::
# 为提升性能,需要多个xapian索引操作,一次性提交数据库
@async(queue=‘xapian’)
def index(data):
pass
def do_commit():
xapian_conn.commit()
# 每执行20个索引任务之后,一次性提交数据库
# 不够20个,但队列空的时候,也会提交
register_batch_queue(‘xapian’, 20, batch_func=do_commit)
| ztq_console | /ztq_console-1.2.5.tar.gz/ztq_console-1.2.5/README.txt | README.txt |
ZTQ:Zopen Task Queue
===========================================
简介
--------------------
ZTQ 队列服务, 分为3个包:ztq_core, ztq_worker, ztq_console。默认使用redis作为队列的后端。
ztq_core ::
提供一系列的方法把任务push到队列中,由ztq_worker去获取队列任务并且执行。
你可以在这里找到它: http://pypi.python.org/pypi/ztq_core/
ztq_worker::
队列的接收端,以线程为单位阻塞式的去监视一个队列。每一个线程称为Worker
当有任务push到了队列中,相应的Worker会自动pull下来去执行。
你可以在这里找到它: http://pypi.python.org/pypi/ztq_worker/
ztq_console::
对每一个队列的每一个任务执行情况进行监控、下达指令。这个包是可选的
你可以在这里找到它: http://pypi.python.org/pypi/ztq_console/
关于 ZTQ
--------------------
::
* 开源, 使用MIT 许可
* 基于Python, 容易使用和修改
* 支持linux 和 windows
* 可靠,可以应付突然断电等情况
* 可管理,自身带有ztq_console 监控后台
* 灵活,可以在不同的机器上运行多个Worker, 并且随时热插拔Worker
* 使用简单
安装
--------------------
::
pip install ztq_core
pip install ztq_worker
pip install ztq_console
使用
-------------------
#. 先定义一个普通的任务 ::
# my_send.py
def send(body):
print ‘START: ‘, body
sleep(5)
print ‘END:’, body
def send2(body):
print ‘START2’, body
raise Exception(‘connection error’)
#. 将普通的任务改成队列任务 ::
# my_send.py
import time
from ztq_core import async
@async # 使用默认队列default
def send(body):
print ‘START: ‘, body
sleep(5)
print ‘END:’, body
@async(queue=‘mail’) # 使用队列mail
def send(body):
print ‘START2’, body
raise Exception(‘connection error’)
#. 运行worker ::
# 运行:bin/ztq_worker app.ini
# app.ini 例子, 在ztq_worker 包里面有个config 目录放有app.ini 这个文件
[server]
host = localhost
port = 6379
db = 0
alias = w01
active_config = false
modules = my_send # 所有需要import的任务模块,每个一行
[queues]
default= 0 # default队列,起1个处理线程
mail = 0, 0 # mail队列,起2个处理线程
[log]
handler_file = ./ztq_worker.log
level = ERROR
#. 运行 ::
import ztq_core
from my_send import send
# 设置 Redis 连接
ztq_core.setup_redis(‘default’, ‘localhost’, 6379, 0)
send(‘hello, world’)
# 动态指定queue
send(‘hello world from mail’, ztq_queue=‘mail’)
#. 更详细的测试例子可见ztq_core包下的demo.py
使用更高级的特征
--------------------------
#. 抢占式执行 ::
# 后插入先执行。如果任务已经在队列,会优先
send (body, ztq_first=True)
#. 探测任务状态 ::
# ztq_first存在就优先, ztq_run不存在就运行
# 返回的是"running" 代表正在运行, 是"queue" 代表正在排队
# 如果是"error" 代表出错, 是"none" 代表这个任务不在排队,也没在执行
ping_task(send, body, ztq_first=True, ztq_run=True)
#. 支持事务 ::
import transaction
ztq_core.enable_transaction(True)
send_mail(from1, to1, body1)
send_mail(from2, to2, body2)
transaction.commit()
# 也可以单独关闭事务
send_mail(from2, to2, body2, ztq_transaction=False)
#. 定时任务 ::
from ztq_core.async import async
from ztq_core import redis_wrap
from ztq_core.cron import has_cron, add_cron_job
@async(queue='clock-0')
def bgrewriteaof():
""" 将redis的AOF文件压缩 """
redis = redis_wrap.get_redis()
redis.bgrewriteaof()
# 如果队列上没有这个定时任务,就加上。自动定时压缩reids
if not has_cron(bgrewriteaof):
add_cron({'hour':1}, bgrewriteaof)
#. 任务串行 ::
from ztq_core import prepare_task
# 根据(方法,参数)生成一个任务
callback = prepare_task(send, body)
# 执行完 send_mail 之后队列会自动将callback 放入指定的队列
send_mail(body, ztq_callback=callback)
#. 异常处理 ::
from ztq_core import prepare_task
@async(queue='mail')
def fail_callback(return_code, return_msg):
print return_code, return_msg
fcallback = prepare_task(send2)
# 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
send(body, ztq_fcallback=fcallback)
#. 进度回调 ::
import ztq_worker
@async(queue='doc2pdf')
def doc2pdf(filename):
...
# 可被进度回调函数调用
ztq_worker.report_progress(page=2)
...
from ztq_core import prepare_task
pcallback = prepare_task(send2, body)
doc2pdf(filename, ztq_pcallback=pcallback)
#. 批处理 ::
# 为提升性能,需要多个xapian索引操作,一次性提交数据库
@async(queue=‘xapian’)
def index(data):
pass
def do_commit():
xapian_conn.commit()
# 每执行20个索引任务之后,一次性提交数据库
# 不够20个,但队列空的时候,也会提交
register_batch_queue(‘xapian’, 20, batch_func=do_commit)
| ztq_core | /ztq_core-1.2.5.tar.gz/ztq_core-1.2.5/README.txt | README.txt |
ZTQ:Zopen Task Queue
===========================================
简介
--------------------
ZTQ 队列服务, 分为3个包:ztq_core, ztq_worker, ztq_console。默认使用redis作为队列的后端。
ztq_core ::
提供一系列的方法把任务push到队列中,由ztq_worker去获取队列任务并且执行。
你可以在这里找到它: http://pypi.python.org/pypi/ztq_core/
ztq_worker::
队列的接收端,以线程为单位阻塞式的去监视一个队列。每一个线程称为Worker
当有任务push到了队列中,相应的Worker会自动pull下来去执行。
你可以在这里找到它: http://pypi.python.org/pypi/ztq_worker/
ztq_console::
对每一个队列的每一个任务执行情况进行监控、下达指令。这个包是可选的
你可以在这里找到它: http://pypi.python.org/pypi/ztq_console/
关于 ZTQ
--------------------
::
* 开源, 使用MIT 许可
* 基于Python, 容易使用和修改
* 支持linux 和 windows
* 可靠,可以应付突然断电等情况
* 可管理,自身带有ztq_console 监控后台
* 灵活,可以在不同的机器上运行多个Worker, 并且随时热插拔Worker
* 使用简单
安装
--------------------
::
pip install ztq_core
pip install ztq_worker
pip install ztq_console
使用
-------------------
#. 先定义一个普通的任务 ::
# my_send.py
def send(body):
print ‘START: ‘, body
sleep(5)
print ‘END:’, body
def send2(body):
print ‘START2’, body
raise Exception(‘connection error’)
#. 将普通的任务改成队列任务 ::
# my_send.py
import time
from ztq_core import async
@async # 使用默认队列default
def send(body):
print ‘START: ‘, body
sleep(5)
print ‘END:’, body
@async(queue=‘mail’) # 使用队列mail
def send(body):
print ‘START2’, body
raise Exception(‘connection error’)
#. 运行worker ::
# 运行:bin/ztq_worker app.ini
# app.ini 例子, 在ztq_worker 包里面有个config 目录放有app.ini 这个文件
[server]
host = localhost
port = 6379
db = 0
alias = w01
active_config = false
modules = my_send # 所有需要import的任务模块,每个一行
[queues]
default= 0 # default队列,起1个处理线程
mail = 0, 0 # mail队列,起2个处理线程
[log]
handler_file = ./ztq_worker.log
level = ERROR
#. 运行 ::
import ztq_core
from my_send import send
# 设置 Redis 连接
ztq_core.setup_redis(‘default’, ‘localhost’, 6379, 0)
send(‘hello, world’)
# 动态指定queue
send(‘hello world from mail’, ztq_queue=‘mail’)
#. 更详细的测试例子可见ztq_core包下的demo.py
使用更高级的特征
--------------------------
#. 抢占式执行 ::
# 后插入先执行。如果任务已经在队列,会优先
send (body, ztq_first=True)
#. 探测任务状态 ::
# ztq_first存在就优先, ztq_run不存在就运行
# 返回的是"running" 代表正在运行, 是"queue" 代表正在排队
# 如果是"error" 代表出错, 是"none" 代表这个任务不在排队,也没在执行
ping_task(send, body, ztq_first=True, ztq_run=True)
#. 支持事务 ::
import transaction
ztq_core.enable_transaction(True)
send_mail(from1, to1, body1)
send_mail(from2, to2, body2)
transaction.commit()
# 也可以单独关闭事务
send_mail(from2, to2, body2, ztq_transaction=False)
#. 定时任务 ::
from ztq_core.async import async
from ztq_core import redis_wrap
from ztq_core.cron import has_cron, add_cron_job
@async(queue='clock-0')
def bgrewriteaof():
""" 将redis的AOF文件压缩 """
redis = redis_wrap.get_redis()
redis.bgrewriteaof()
# 如果队列上没有这个定时任务,就加上。自动定时压缩reids
if not has_cron(bgrewriteaof):
add_cron({'hour':1}, bgrewriteaof)
#. 任务串行 ::
from ztq_core import prepare_task
# 根据(方法,参数)生成一个任务
callback = prepare_task(send, body)
# 执行完 send_mail 之后队列会自动将callback 放入指定的队列
send_mail(body, ztq_callback=callback)
#. 异常处理 ::
from ztq_core import prepare_task
@async(queue='mail')
def fail_callback(return_code, return_msg):
print return_code, return_msg
fcallback = prepare_task(send2)
# 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
send(body, ztq_fcallback=fcallback)
#. 进度回调 ::
import ztq_worker
@async(queue='doc2pdf')
def doc2pdf(filename):
...
# 可被进度回调函数调用
ztq_worker.report_progress(page=2)
...
from ztq_core import prepare_task
pcallback = prepare_task(send2, body)
doc2pdf(filename, ztq_pcallback=pcallback)
#. 批处理 ::
# 为提升性能,需要多个xapian索引操作,一次性提交数据库
@async(queue=‘xapian’)
def index(data):
pass
def do_commit():
xapian_conn.commit()
# 每执行20个索引任务之后,一次性提交数据库
# 不够20个,但队列空的时候,也会提交
register_batch_queue(‘xapian’, 20, batch_func=do_commit)
| ztq_worker | /ztq_worker-1.3.2.tar.gz/ztq_worker-1.3.2/README.txt | README.txt |
ZTQ:Z Task Queue
===========================================
ZTQ是python语言的一个开源异步队列服务, 使用redis作为队列的存储和通讯。
和其他队列服务不同,ZTQ的设计目标是:
- 实现简单
- 容易使用
- 可靠
- 错误、拥塞时,可管理
- 容易调试
- 灵活调度,高效利用服务器
详细介绍可参看: https://github.com/everydo/ztq/raw/master/about-ztq.pptx
ZTQ是由易度云办公(http://everydo.com) 赞助开发的,在易度云查看和易度文档管理等系统中广泛使用。
主要作者和维护人:
- 徐陶哲 http://weibo.com/xutaozhe
- 潘俊勇 http://weibo.com/panjunyong
安装
--------------------
包括4个包:
1. ztq_core: 提供队列操作的底层操作API
2. ztq_worker: 队列的处理服务
3. ztq_console:队列的监控后台服务(使用Pyramid开发),这个包是可选运行的
4. ztq_demo: 一个demo示例
可直接使用标准的pip进行安装:
pip install ztq_core
pip install ztq_worker
pip install ztq_console
使用
-------------------
详细的测试例子可见 ztq_demo包
1. 先定义一个普通的任务
import time
def send(body):
print 'START: ', body
time.sleep(5)
print 'END:’, body
def send2(body):
print 'START2', body
raise Exception('connection error')
2. 将普通的任务改成队列任务
import time
from ztq_core import async
@async # 使用默认队列default
def send(body):
print 'START: ', body
time.sleep(5)
print 'END:', body
@async(queue='mail') # 使用队列mail
def send2(body):
print 'START2', body
raise Exception('connection error')
3. 运行worker
通过这个命令运行worker
bin/ztq_worker worker.ini
下面是 worker.ini 例子:
[server]
host = localhost
port = 6379
db = 0
alias = w01
active_config = false
modules = ztq_demo.tasks # 所有需要import的任务模块,每个一行
[queues]
default= 0 # default队列,起1个处理线程
mail = 0, 0 # mail队列,起2个处理线程
[log]
handler_file = ./ztq_worker.log
level = ERROR
4. 运行
import ztq_core
from ztq_demo.tasks import send
# 设置 Redis 连接
ztq_core.setup_redis('default', 'localhost', 6379, 0)
send('hello, world')
# 动态指定queue
send('hello world from mail', ztq_queue='mail')
启动监控后台
--------------------
bin/pserve app.ini
更高级的特性
--------------------------
1. 抢占式执行
后插入先执行。如果任务已经在队列,会优先
send (body, ztq_first=True)
2. 探测任务状态
ping_task(send, body, ztq_first=True, ztq_run=True)
任务存在如下状态:
* running: 代表正在运行,
* queue: 代表正在排队
* error: 代表出错
* none: 代表这个任务不在排队,也没在执行
参数:
- ztq_first:存在就优先
- ztq_run:不存在就运行
3. 支持事务
import transaction
ztq_core.enable_transaction(True)
send_mail(from1, to1, body1)
send_mail(from2, to2, body2)
transaction.commit()
# 也可以单独关闭事务
send_mail(from2, to2, body2, ztq_transaction=False)
4. 定时任务
from ztq_core.async import async
from ztq_core import redis_wrap
from ztq_core.cron import has_cron, add_cron_job
@async(queue='clock-0')
def bgrewriteaof():
""" 将redis的AOF文件压缩 """
redis = redis_wrap.get_redis()
redis.bgrewriteaof()
# 如果队列上没有这个定时任务,就加上。自动定时压缩reids
if not has_cron(bgrewriteaof):
add_cron({'hour':1}, bgrewriteaof)
5. 任务串行
from ztq_core import prepare_task
# 根据(方法,参数)生成一个任务
callback = prepare_task(send, body)
# 执行完 send_mail 之后队列会自动将callback 放入指定的队列
send_mail(body, ztq_callback=callback)
6. 异常处理
from ztq_core import prepare_task
@async(queue='mail')
def fail_callback(return_code, return_msg):
print return_code, return_msg
fcallback = prepare_task(send2)
# 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
send(body, ztq_fcallback=fcallback)
7. 进度回调
import ztq_worker
@async(queue='doc2pdf')
def doc2pdf(filename):
...
# 可被进度回调函数调用
ztq_worker.report_progress(page=2)
...
from ztq_core import prepare_task
pcallback = prepare_task(send2, body)
doc2pdf(filename, ztq_pcallback=pcallback)
8. 批处理
# 为提升性能,需要多个xapian索引操作,一次性提交数据库
@async(queue=‘xapian’)
def index(data):
pass
def do_commit():
xapian_conn.commit()
# 每执行20个索引任务之后,一次性提交数据库
# 不够20个,但队列空的时候,也会提交
register_batch_queue(‘xapian’, 20, batch_func=do_commit)
| ztq_worker | /ztq_worker-1.3.2.tar.gz/ztq_worker-1.3.2/README.md | README.md |
[](https://pypi.python.org/pypi/ztrack)
[](https://pypi.python.org/pypi/ztrack)
[](https://github.com/psf/black)
[](https://pycqa.github.io/isort/)
[](http://mypy-lang.org/)
# ztrack
Toolbox for zebrafish pose estimation.
## Installation
Use the package manager [pip](https://pip.pypa.io/en/stable/) to install ztrack.
```bash
pip install ztrack
```
| ztrack | /ztrack-0.4.1.tar.gz/ztrack-0.4.1/README.md | README.md |
<div align="right">
语言:
🇨🇳
<a title="中文" href="./README.zh_CN.md">🇺🇸</a>
<!-- <a title="俄语" href="../ru/README.md">🇷🇺</a> -->
</div>
<div align="center"><a title="" href="https://github.com/ZJCV/ZTransforms.git"><img align="center" src="./imgs/ZTransforms.png"></a></div>
<p align="center">
«ZTransforms»是一个图像数据增强代码库
<br>
<br>
<a href="https://github.com/RichardLitt/standard-readme"><img src="https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square"></a>
<a href="https://conventionalcommits.org"><img src="https://img.shields.io/badge/Conventional%20Commits-1.0.0-yellow.svg"></a>
<a href="http://commitizen.github.io/cz-cli/"><img src="https://img.shields.io/badge/commitizen-friendly-brightgreen.svg"></a>
</p>
基于[pytorch/vision](https://github.com/pytorch/vision/)实现架构,添加[albumentations](https://github.com/albumentations-team/albumentations/tree/f2462be3a4d01c872474d0e7fc0f32f387b06340)后端
* 输入图像格式:`numpy ndarray`
* 数据类型:`uint8`
* 通道排列顺序:`rgb`
关键依赖版本:
* `pytorch/vision: c1f85d34761d86db21b6b9323102390834267c9b`
* `albumentations-team/albumentations: v0.5.2`
## 内容列表
- [内容列表](#内容列表)
- [背景](#背景)
- [安装](#安装)
- [使用](#使用)
- [主要维护人员](#主要维护人员)
- [致谢](#致谢)
- [参与贡献方式](#参与贡献方式)
- [许可证](#许可证)
## 背景
[PyTorch](https://github.com/pytorch/pytorch)提供了官方数据增强实现:[transforms](https://github.com/pytorch/vision/tree/master/torchvision/transforms)。该模块基于`PIL`进行数据增强操作,其优缺点如下:
* 优点:
1. 简洁清晰的数据架构
2. 简单易懂的数据处理流
3. 完善的文档介绍
* 缺点:
1. 基于`PIL`后端,提供的图像增强功能有限
2. 基于`PIL`后端,相较于其他库的执行速度慢
针对于执行速度问题,`torchvision`也意识到了这一点,从`0.8.0`开始进行了改进
```
Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric and presented multiple limitations due to that. Now, since v0.8.0, transforms implementations are Tensor and PIL compatible and we can achieve the following new features:
transform multi-band torch tensor images (with more than 3-4 channels)
torchscript transforms together with your model for deployment
support for GPU acceleration
batched transformation such as for videos
read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)
```
* 一方面通过新的后端[Pillow-SIMD](https://github.com/uploadcare/pillow-simd)来提高`PIL`的执行速度;
* 另一方面添加`PyTorch`后端来实现`GPU`加速
在网上找到两个数据增强库,除了分类数据增强外还提供了检测/分割数据增强:
* [imgaug](https://github.com/aleju/imgaug):其实现了更多的数据增强操作;
* [albumentations](https://github.com/albumentations-team/albumentations/tree/f2462be3a4d01c872474d0e7fc0f32f387b06340):其在不同的后端(`pytorch/imgaug/opencv`)中找出各自最快的增强函数(参考[Benchmarking results](https://github.com/albumentations-team/albumentations#benchmarking-results))
上述两个数据增强库均实现了类似于`transforms`的数据流操作方式。不过相对而言,个人还是最喜欢官方的实现和使用方式,所以新建这个代码库,基于[transforms](https://github.com/pytorch/vision/tree/master/torchvision/transforms),在原有功能中添加`albumentation`后端实现,同时添加新的数据增强操作(*如果`albumentation`未实现,就使用`imgaug`实现*)
## 安装
```
$ pip install ztransforms
```
## 使用
```
# import torchvision.transforms as transforms
import ztransforms.cls as transforms
...
...
```
## 主要维护人员
* zhujian - *Initial work* - [zjykzj](https://github.com/zjykzj)
## 致谢
* [pytorch/vision](https://github.com/pytorch/vision)
* [albumentations-team/albumentations](https://github.com/albumentations-team/albumentations/tree/f2462be3a4d01c872474d0e7fc0f32f387b06340)
* [aleju/imgaug](https://github.com/aleju/imgaug)
* [opencv/opencv](https://github.com/opencv/opencv)
```
@Article{info11020125,
AUTHOR = {Buslaev, Alexander and Iglovikov, Vladimir I. and Khvedchenya, Eugene and Parinov, Alex and Druzhinin, Mikhail and Kalinin, Alexandr A.},
TITLE = {Albumentations: Fast and Flexible Image Augmentations},
JOURNAL = {Information},
VOLUME = {11},
YEAR = {2020},
NUMBER = {2},
ARTICLE-NUMBER = {125},
URL = {https://www.mdpi.com/2078-2489/11/2/125},
ISSN = {2078-2489},
DOI = {10.3390/info11020125}
}
@misc{imgaug,
author = {Jung, Alexander B.
and Wada, Kentaro
and Crall, Jon
and Tanaka, Satoshi
and Graving, Jake
and Reinders, Christoph
and Yadav, Sarthak
and Banerjee, Joy
and Vecsei, Gábor
and Kraft, Adam
and Rui, Zheng
and Borovec, Jirka
and Vallentin, Christian
and Zhydenko, Semen
and Pfeiffer, Kilian
and Cook, Ben
and Fernández, Ismael
and De Rainville, François-Michel
and Weng, Chi-Hung
and Ayala-Acevedo, Abner
and Meudec, Raphael
and Laporte, Matias
and others},
title = {{imgaug}},
howpublished = {\url{https://github.com/aleju/imgaug}},
year = {2020},
note = {Online; accessed 01-Feb-2020}
}
```
## 参与贡献方式
欢迎任何人的参与!打开[issue](https://github.com/zjykzj/ZTransforms/issues)或提交合并请求。
注意:
* `GIT`提交,请遵守[Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0-beta.4/)规范
* 语义版本化,请遵守[Semantic Versioning 2.0.0](https://semver.org)规范
* `README`编写,请遵守[standard-readme](https://github.com/RichardLitt/standard-readme)规范
## 许可证
[Apache License 2.0](LICENSE) © 2021 zjykzj | ztransforms | /ztransforms-0.2.0.tar.gz/ztransforms-0.2.0/README.md | README.md |
# ztranslator
Simples tradutor de linha de comando. Você pode configurar uma combinação de teclas de atalho para que execute o ztranslator, para isso basta passar --notify como parametro de linha comando, que o ztranslator se encarrega de pegar a última entrada na área de transferência e exibir o texto traduzido em um simpático balãozinho na sua Área de Trabalho.
## Instalação:
```sh
$ pip install ztranslator
```
## Exemplos de uso:
#### Na linha de comando:
```sh
$ ztranslator --text "Hello World" --source-lang en --target-lang pt --source-api google
$ ztranslator --help
```
```sh
$ python -m translator --help
```
#### No seu código python:
```python
In [1]: from translator import Translator
In [2]: t = Translator(source_lang="pt", target_lang='en', source_api="google")
In [3]: t.translate("Type copyright, credits or license for more information")
Out[3]: 'Digite copyright, créditos ou licença para mais informações'
```
## Configuração para Desenvolvimento
```sh
$ git clone https://github.com/andreztz/ztranslator.git
$ cd ztranslator
$ virtualenv venv
$ source venv/bin/activate
$ pip install -e .
```
## Histórico de lançamento
- 1.0.0 - Altera interface da API e linha de comando.
- 0.1.0 - Adiciona acesso a api do google translate via googletrans.
- 0.0.7 - O primeiro lançamento adequado.
- Trabalho em andamento
André Santos – [@ztzandre](https://twitter.com/ztzandre) – [email protected]
[https://github.com/andreztz/ztranslator](https://github.com/andreztz/)
## Contribua
1. Fork it (<https://github.com/andreztz/ztranslator/fork>)
2. Create your feature branch (`git checkout -b feature/fooBar`)
3. Commit your changes (`git commit -am 'Add some fooBar'`)
4. Push to the branch (`git push origin feature/fooBar`)
5. Create a new Pull Request
| ztranslator | /ztranslator-1.0.0.tar.gz/ztranslator-1.0.0/README.md | README.md |
from abc import ABC
from abc import abstractmethod
from textwrap import wrap
from requests import get
from requests.utils import quote
import googletrans
class Error(Exception):
pass
class SourceNotFound(Error):
def __init__(self):
message = "The source API does not exist. Try `google` or `mymemory`."
super().__init__(message)
class TranslatorBase(ABC):
@abstractmethod
def translate(self, text):
pass
class MyMemoryTranslator(TranslatorBase):
"""mymemory.translated.net"""
base_url = "http://mymemory.translated.net"
def __init__(self, source_lang, target_lang):
self.source_lang = source_lang
self.target_lang = target_lang
def translate(self, source):
if self.source_lang != self.target_lang:
self.source_list = wrap(source, 1000, replace_whitespace=False)
source = " ".join(self._get_translation(s) for s in self.source_list)
return source
return source
def _get_translation(self, source):
data = self._get_json(source)
translation = data["responseData"]["translatedText"]
if not isinstance(translation, bool):
return translation
else:
matches = data["matches"]
for match in matches:
if not isinstance(match["translation"], bool):
next_best_match = match["translation"]
break
return next_best_match
def _get_json(self, source):
escaped_source = quote(source, "")
query = (
f"/api/get?q={escaped_source}"
f"&langpair={self.source_lang}|{self.target_lang}"
)
url = self.base_url + query
resp = get(url)
return resp.json()
class GoogleTranslator(TranslatorBase):
def __init__(self, source_lang, target_lang):
self.source_lang = source_lang
self.target_lang = target_lang
self.__translator = googletrans.Translator()
def translate(self, text):
text = self.__translator.translate(
text, src=self.source_lang, dest=self.target_lang
)
return text.text
class SourceManager:
sources = {"google": GoogleTranslator, "mymemory": MyMemoryTranslator}
def __init__(self, source_api):
self.api = source_api
def get(self, source_lang, target_lang):
translator = self.sources.get(self.api, None)
if translator is None:
raise SourceNotFound()
return translator(source_lang, target_lang)
class Translator:
def __init__(self, source_lang, target_lang, source_api):
self._translator = SourceManager(source_api).get(source_lang, target_lang)
def translate(self, text):
return self._translator.translate(text) | ztranslator | /ztranslator-1.0.0.tar.gz/ztranslator-1.0.0/translator/translate.py | translate.py |
import sys
import os
import os.path as path
import subprocess
import locale
import ctypes
## Utilities.
py3 = sys.version_info >= (3, 0)
def unicode_or_bytes(input):
if py3:
return isinstance(input, (str, bytes))
else:
return isinstance(input, (unicode, str))
def to_unicode(input, encoding="ascii"):
if py3:
if isinstance(input, str):
return input
if isinstance(input, bytes):
return input.decode(encoding)
else:
if isinstance(input, unicode):
return input
if isinstance(input, str):
return unicode(input, encoding)
def to_bytes(input, encoding="utf-8"):
if py3:
if isinstance(input, str):
return input.encode(encoding)
if isinstance(input, bytes):
return input
else:
if isinstance(input, unicode):
return input.encode(encoding)
if isinstance(input, str):
return input
def which(name, flag=os.X_OK):
""" Search PATH for given executable name. """
executables = []
paths = [entry for entry in os.environ.get("PATH", "").split(os.pathsep) if entry]
extensions = [ext for ext in os.environ.get("PATHEXT", "").split(os.pathsep) if ext]
for entry in paths:
base = path.join(entry, name)
# No PATHEXT? Go for bare executables.
if not extensions and os.access(base, flag) and base not in executables:
executables.append(base)
else:
# Search PATHEXT.
for ext in extensions:
wanted = base + "." + ext
if os.access(wanted, flag) and wanted not in executables:
executables.append(wanted)
return executables
def supported():
""" Return whether clippy supports this platform or not. """
return get != unknown_get and set != unknown_set and clear != unknown_clear
## OS APIs.
# Win32.
WIN32_CF_TEXT = 1
WIN32_CF_UNICODETEXT = 13
WIN32_ENCODING = "ascii"
WIN32_UNICODE_ENCODING = "utf-16-le"
def win32_setup():
""" Setup relevant ctypes argtypes and return (user32, kernel32) pair. """
# Make ctypes happy.
if not hasattr(win32_setup, "user32"):
win32_setup.user32 = ctypes.windll.user32
user32 = win32_setup.user32
user32.OpenClipboard.argtypes = [ctypes.c_void_p]
user32.GetClipboardData.restype = ctypes.c_void_p
user32.SetClipboardData.argtypes = [ctypes.c_uint, ctypes.c_void_p]
user32.SetClipboardData.restype = ctypes.c_void_p
user32.GetActiveWindow.restype = ctypes.c_void_p
if not hasattr(win32_setup, "kernel32"):
win32_setup.kernel32 = ctypes.windll.kernel32
kernel32 = win32_setup.kernel32
kernel32.GlobalAlloc.argtypes = [ctypes.c_uint, ctypes.c_size_t]
kernel32.GlobalAlloc.restype = ctypes.c_void_p
kernel32.GlobalFree.argtypes = [ctypes.c_void_p]
kernel32.GlobalLock.argtypes = [ctypes.c_void_p]
kernel32.GlobalLock.restype = ctypes.c_void_p
kernel32.GlobalUnlock.argtypes = [ctypes.c_void_p]
return win32_setup.user32, win32_setup.kernel32
def win32_clear():
""" Clear clipboard text data. """
user32, kernel32 = win32_setup()
window = user32.GetActiveWindow()
user32.OpenClipboard(window)
user32.EmptyClipboard()
user32.CloseClipboard()
def win32_get():
""" Get clipboard text data. Returns a unicode instance or None. """
user32, kernel32 = win32_setup()
data = None
window = user32.GetActiveWindow()
user32.OpenClipboard(window)
if data is None:
# Try Unicode data first.
contents = user32.GetClipboardData(WIN32_CF_UNICODETEXT)
if contents is not None:
# Lock data, decode data, unlock it.
lock = kernel32.GlobalLock(contents)
data = ctypes.c_wchar_p(contents).value
kernel32.GlobalUnlock(lock)
if data is None:
# Try normal text.
contents = user32.GetClipboardData(WIN32_CF_TEXT)
if contents is not None:
# Lock data, decode data, unlock it.
lock = kernel32.GlobalLock(contents)
data = to_unicode(ctypes.c_char_p(contents).value, WIN32_ENCODING)
kernel32.GlobalUnlock(lock)
user32.CloseClipboard()
return data
def win32_set(data):
""" Set clipboard text data. Accepts unicode or str. """
user32, kernel32 = win32_setup()
if not unicode_or_bytes(data):
raise TypeError("Clipboard data can only be unicode strings or bytes.")
unidata = to_unicode(data).encode(WIN32_UNICODE_ENCODING)
asciidata = to_bytes(data, WIN32_ENCODING)
window = user32.GetActiveWindow()
user32.OpenClipboard(window)
user32.EmptyClipboard()
# Set UNICODETEXT.
try:
win32_set_data(WIN32_CF_UNICODETEXT, unidata)
except RuntimeError:
pass
# Set TEXT.
try:
win32_set_data(WIN32_CF_TEXT, asciidata)
except RuntimeError:
pass
user32.CloseClipboard()
def win32_set_data(type, data):
""" Set clipboard data for specific type from data. """
user32, kernel32 = win32_setup()
ptr = kernel32.GlobalAlloc(0, len(data) + 1)
if ptr is not None:
# Copy data over.
lock = kernel32.GlobalLock(ptr)
dataptr = ctypes.c_char_p(data)
ctypes.memmove(ptr, dataptr, len(data) + 1)
kernel32.GlobalUnlock(lock)
# Set clipboard.
res = user32.SetClipboardData(type, ptr)
if res is None:
# Setting data failed. Deallocate.
kernel32.GlobalFree(ptr)
raise RuntimeError("Could not set clipboard data.")
# OS X.
def osx_pb_clear():
""" Clear clipboard text data. """
with open(os.devnull, "rb") as f:
process = subprocess.Popen(["pbcopy"], stdin=f)
process.wait()
def osx_pb_get():
""" Get clipboard text data. Return a unicode instance, or None. """
process = subprocess.Popen(["pbpaste"], stdout=subprocess.PIPE)
data, _ = process.communicate()
encoding = locale.getpreferredencoding()
return to_unicode(data, encoding) if data else None
def osx_pb_set(data):
""" Set clipboard text data. Accepts a unicode or str. """
if not unicode_or_bytes(data):
raise TypeError("Clipboard data can only be unicode or str.")
data = to_unicode(data)
encoding = locale.getpreferredencoding()
raw = data.encode(encoding)
process = subprocess.Popen(["pbcopy"], stdin=subprocess.PIPE)
process.communicate(raw)
# X windowing system: xclip.
X_CLIPBOARDS = ["CLIPBOARD", "PRIMARY"]
def x_xclip_clear():
""" Clear clipboard text data. """
with open(os.devnull, "rb") as f:
for clipboard in X_CLIPBOARDS:
process = subprocess.Popen(
["xclip", "-selection", clipboard.lower(), "-i"], stdin=f
)
process.wait()
def x_xclip_get():
""" Get clipboard text data. Return a unicode instance, or None. """
raw = None
encoding = locale.getpreferredencoding()
for clipboard in X_CLIPBOARDS:
process = subprocess.Popen(
["xclip", "-selection", clipboard.lower(), "-o"], stdout=subprocess.PIPE
)
raw, _ = process.communicate()
if process.returncode == 0 and raw:
break
return to_unicode(raw, encoding) if raw else None
def x_xclip_set(data):
""" Set clipboard text data. Accepts a unicode or str. """
if not unicode_or_bytes(data):
raise TypeError("Clipboard data can only be unicode or str.")
data = to_unicode(data)
encoding = locale.getpreferredencoding()
raw = data.encode(encoding)
for clipboard in X_CLIPBOARDS:
process = subprocess.Popen(
["xclip", "-selection", clipboard.lower(), "-i"], stdin=subprocess.PIPE
)
process.communicate(raw)
# X windowing system: xsel.
def x_xsel_clear():
""" Clear clipboard text data. """
with open(os.devnull, "rb") as f:
for clipboard in X_CLIPBOARDS:
process = subprocess.Popen(
["xsel", "--" + clipboard.lower(), "-i"], stdin=f
)
process.wait()
def x_xsel_get():
""" Get clipboard text data. Return a unicode instance, or None. """
raw = None
encoding = locale.getpreferredencoding()
for clipboard in X_CLIPBOARDS:
process = subprocess.Popen(
["xsel", "--" + clipboard.lower()], stdout=subprocess.PIPE
)
raw, _ = process.communicate()
if process.returncode == 0 and raw:
break
return to_unicode(raw, encoding) if raw else None
def x_xsel_set(data):
""" Set clipboard text data. Accepts a unicode or str. """
if not unicode_or_bytes(data):
raise TypeError("Clipboard data can only be unicode or str.")
data = to_unicode(data)
encoding = locale.getpreferredencoding()
raw = data.encode(encoding)
for clipboard in X_CLIPBOARDS:
process = subprocess.Popen(
["xsel", "--" + clipboard.lower()], stdin=subprocess.PIPE
)
process.communicate(raw)
# Unknown platform. Just error.
def unknown_clear():
raise RuntimeError(
"clippy hasn't been ported to this platform ({}) yet.".format(sys.platform)
)
def unknown_get():
raise RuntimeError(
"clippy hasn't been ported to this platform ({}) yet.".format(sys.platform)
)
def unknown_set(data):
raise RuntimeError(
"clippy hasn't been ported to this platform ({}) yet.".format(sys.platform)
)
## Selecta!
if sys.platform.startswith("win") or sys.platform == "cygwin":
get = win32_get
set = win32_set
clear = win32_clear
elif sys.platform == "darwin" and which("pbcopy") and which("pbpaste"):
get = osx_pb_get
set = osx_pb_set
clear = osx_pb_clear
elif which("xclip"):
get = x_xclip_get
set = x_xclip_set
clear = x_xclip_clear
elif which("xsel"):
get = x_xsel_get
set = x_xsel_set
clear = x_xsel_clear
else:
get = unknown_get
set = unknown_set
clear = unknown_clear
## Tests.
if __name__ == "__main__":
print("testing ASCII...")
set("foo!")
assert get() == u"foo!"
print("success")
print("testing Unicode...")
set(u"( ≖‿≖)")
assert get() == u"( ≖‿≖)"
print("success")
print("testing clear...")
clear()
assert get() is None
print("success") | ztranslator | /ztranslator-1.0.0.tar.gz/ztranslator-1.0.0/translator/clip.py | clip.py |
.. _zttt_main:
ZTicTacToe
============
Tic Tac Toe is a famous game in which two players take turns placing
a mark on a 3x3 grid. The first player to get three in a row wins.
The module is a standalone implementation of the game TicTacToe
providing functionality to keep track of the game's state and to
make moves.
.. _zttt_features:
Features
---------
- Standalone implementation of the game Tic Tac Toe.
- Provides a way to customise **move triggers** and access state variables.
- Comes with an engine with near perfect moves.
- Written in Python from scratch and does not require any external libraries.
- Can be integrated into a larger project, with very little effort.
- Throws custom-built errors making it easy to debug and handle errors.
.. _zttt_links:
Links
------
- `PyPI <https://pypi.python.org/pypi/zttt>`_
- `GitHub <https://github.com/Sigma1084/ZTicTacToe/tree/v1>`_
- `Documentation <https://ztictactoe.readthedocs.io/en/v1/>`_
- `Examples <https://github.com/Sigma1084/ZTicTacToe/tree/v1/examples>`_
.. _zttt_install:
Installation
-------------
.. code-block:: bash
pip install zttt
| zttt | /zttt-1.0.1.tar.gz/zttt-1.0.1/README.rst | README.rst |
# *ztv* - astronomical image viewer
*ztv* is an astronomical image viewer designed to be used from a python command line for display and analysis.
*ztv* is useful as-is for display and simple analysis of images already loaded in to [numpy arrays](http://www.numpy.org), as well as [FITS files](http://fits.gsfc.nasa.gov/fits_primer.html). It can display the most recently acquired image by watching a directory for new FITS files to appear or watching a single FITS file for when it changes. It can also receive new images via an [ActiveMQ message stream](http://activemq.apache.org).
*ztv* is intended for real-time display and analysis. *ztv* is not intended to produce publication quality figures.
*ztv* comes with a number of built-in control panels, for:
- selecting input source (FITS file, auto-reload from FITS file, etc)
- selecting a frame to subtract (e.g. sky or dark) and a flat field frame to divide by
- setting colormap, stretch, and lower/upper limits
- doing basic slice plots, statistics, and aperture photometry.
Additional panels can be written and added, for e.g. controlling a camera. (One example add-on panel is included that generates faked images in the FITS format.)
If proper FITS header keywords are available, *ztv* will display the ra/dec of the cursor point.
# Examples of usage
To launch:
import ztv
z = ztv.ZTV()

To load an image in a numpy array:
import numpy as np
im = np.random.normal(size=[10, 256, 256]) # create a 3-d image stack
z.load(im)

You can now look at your data, manipulate display parameters, etc all using the gui elements. All of these elements are accessible through the tabbed control panels. You can also switch amongst the control panel tabs by `cmd-alt-#` where *#* is the number of the panel, starting from 1. Or, by `cmd-[` and `cmd-]` to move left/right amongst the tabs. You can even switch tabs from the command line api, e.g.:
z.control_panel('Color')

To change cursor mode, press `cmd-#` where *#* is the number shown in the pop-up menu that's available by right-clicking in the primary image area:

To manipulate display parameters:
z.cmap('gist_heat')
z.minmax(0., 4.)
z.scaling('Sqrt')
z.xy_center(100, 100)
z.zoom(5.)

To set up a statistics box and see the GUI output (note that output is also returned to your command line as a dict):
z.stats_box(xrange=[80, 100], yrange=[100,120], show_overplot=True)
z.control_panel('Stats')

There's a lot more you can do from the command line if you play with *ztv*, especially in an exploration-friendly environment like [ipython](http://ipython.org/). And, anything you can do from the command line can be done from the GUI.
Download an iconic FITS image from the web and display it:
from urllib import urlopen
from zipfile import ZipFile
from StringIO import StringIO
remote_url = 'http://www.spacetelescope.org/static/projects/fits_liberator/datasets/eagle/656nmos.zip'
local_filename = '/tmp/hst-eagle-nebula-656nmos.fits'
zip = ZipFile(StringIO(urlopen(remote_url).read()))
zip_filename = zip.filelist[0].filename
open(local_filename, 'w').write(zip.open(zip_filename).read())
z.load(local_filename)
z.scaling('Log')
z.minmax(0, 500)

We can even do a little aperture photometry while we're here:
z.cmap('gray')
z.xy_center(624, 524)
z.zoom(4)
z.minmax(0, 1000)
z.scaling('Asinh')
z.control_panel('phot')
z.aperture_phot(xclick=614, yclick=516, show_overplot=True)

And, of course, you can adjust the window size to suit your needs, either smaller:

or larger:

## Example of an Add-on Control Panel
One of the motivating use cases for *ztv* was real-time quick-look of incoming images and the ability to extend the basic installation, including instrumentat control. An example of this is that *ztv* will be used to both control and inspect the images from a slit viewing camera on a spectrograph of mine. To demonstrate this extensibility, there's a simple example in `ztv_examples/fits_faker_panel/`:
from ztv_examples.fits_faker_panel.launch_ztv import launch_ztv
z = launch_ztv()
z.start_fits_faker()

Our fake example data looks a lot better when we subtract the sky and divide the flat field (someone needs to blow the dust off that fake dewar window...):
z.control_panel('Source')
z.sky_frame(True)
z.flat_frame(True)

# Installation and Dependencies
*ztv* uses several packages, including [wxPython](http://wxpython.org), [astropy](http://www.astropy.org). These should be automatically installed if you install *ztv* from [pypi](https://pypi.python.org/pypi/ztv) with:
pip install ztv
You can also grab source code from [github](https://github.com/henryroe/ztv).
Note that *ztv* was developed and tested on OS X.
## Example of installation using Mac OS X's included Python
The following steps worked on a fresh install of OS X Yosemite 10.10.5 on 2015-09-06:
* Install Xcode from the App Store
* Launch Xcode one time to accept licenses
* Install pip and other necessary python packages
Run following command lines in a terminal:
curl -o ~/Downloads/get-pip.py https://bootstrap.pypa.io/get-pip.py
sudo -H python ~/Downloads/get-pip.py
sudo -H pip install matplotlib
sudo -H pip install astropy
sudo -H pip install astropy-helpers
* Install wxPython version 3
* Download [OS X cocoa version of wxPython version 3 from here](http://wxpython.org/download.php#osx) (was *wxPython3.0-osx-docs-demos-3.0.2.0-cocoa-py2.7.dmg* at time of writing)
* Open disk image and install with the following command line command:
(This is necessary because package isn't properly signed & is an old-style package, see [here](http://stackoverflow.com/questions/21223717/install-wxpython-on-mac-os-mavericks). Obviously may need to update exact file path to the pkg.)
sudo installer -pkg /Volumes/wxPython3.0-osx-3.0.2.0-cocoa-py2.7/wxPython3.0-osx-cocoa-py2.7.pkg -target /
Finally, install *ztv*:
sudo -H pip install ztv
## Example of installation into anaconda python distribution
The following was tested on a fresh install of OS X 10.10.5 on 2015-09-08.
Install Xcode from the App Store and launch Xcode one time to accept its licenses.
Download [Anaconda-2.3.0-MacOSX-x86_64.sh](http://continuum.io/downloads) from here.
bash Anaconda-2.3.0-MacOSX-x86_64.sh
source ~/.bash_profile
conda create --name ztv-test wxpython matplotlib
source activate ztv-test
pip install ztv
## Example of installation into a Homebrew python distribution
The following was tested on a fresh install of OS X 10.10.5 on 2015-09-07.
Install Xcode from the App Store and launch Xcode one time to accept its licenses.
Install [Homebrew](http://brew.sh/) with the one-line ruby command on [Homebrew's home page](http://brew.sh/)
Install python & other necessary bits with the following commands.
brew install python
brew install wxpython
pip install numpy
pip install ztv
Note that `numpy` is explicitly installed first using `pip install numpy` before `ztv` is installed. During testing on OS X 10.10.5 on 2015-09-07 allowing the numpy dependency to be automatically filled by `pip install ztv` resulted in an installation error that does not occur if you follow the above sequence.
## Linux/Ubuntu
I tested briefly on Ubuntu 14.04. *ztv* basically works, although the pulldown colormap menus will not have bitmaps of the colormaps. Also, (at least on my testing virtual machine) the performance of *ztv* was much laggier than on my main OS X laptop. For the colormaps you could try looking at [this link](http://askubuntu.com/questions/464146/how-to-enable-icons-in-menus-in-ubuntu-14-04), but it didn't work on my test system.
# Background
In graduate school in the late 1990's I learned [IDL](http://en.wikipedia.org/wiki/IDL_(programming_language)) and used [Aaron Barth's ATV](http://www.physics.uci.edu/~barth/atv/) extensively. I even contributed a little to a now-outdated version of [ATV](http://www.physics.uci.edu/~barth/atv/), adding 3-d image stack capability. [ATV](http://www.physics.uci.edu/~barth/atv/) was and is incredibly useful for quick-looks at image data, analysis, and all the things you want when working with typical astronomical image data.
After graduate school I began migrating toward python and away from IDL. I've written about this choice elsewhere, but some of the basic reasons were to avoid IDL licensing issues and being beholden to one company. (To be fair, how much I pay every year to keep my IDL license current has always been reasonable. It helps that my license has some obscure history to it that makes the maintenance fees moderate. But, at any time they could raise the prices on me massively. And, I wanted to use a language that could effectively be on every machine I touch, from my main laptop to an embedded server.)
In python there are already a multitude of possible image viewers. Many of which are great and can do much of what I needed. (See next section for some links.) But, inevitably as I've played with them I've found they each doesn't scratch my itch in some way. I wanted something that worked exactly the way I wanted, with the right (for me) mix of complexity and simplicity. I need day-to-day image quicklook from the python command-line, e.g. while I'm developing some new image processing algorithm or to check on last night's data. But, I also need to be able to easily adapt my viewer to other situations, including real-time use on a slit-viewing camera, quick-reduction of incoming data, etc.. So, I wrote *ztv*.
The name *ztv* is an obvious play off of [ATV](http://www.physics.uci.edu/~barth/atv/). And, "z" is my daughter's middle initial.
# Other Image Viewers You Should Check Out
- If you're using IDL, check out [ATV](http://www.physics.uci.edu/~barth/atv/) of course!
- [SAOImage DS9](http://ds9.si.edu/site/Home.html)
- [Aladin Desktop Sky Atlas](http://aladin.u-strasbg.fr) (not primarily an image viewer, but can open FITS files and overlay catalogs and other images nicely)
- [ginga](http://ejeschke.github.io/ginga/)
- [Toyz](http://fred3m.github.io/toyz/)
(If your favorite isn't on this list, please email [email protected] to get it added.)
# Acknowledgements
Thank you to Aaron Barth for his original [ATV](http://www.physics.uci.edu/~barth/atv/). Thank you to all the numerous people who have put so much effort in to all the packages that make my work not only easier but possible. I especially thank the developers of [astropy](http://www.astropy.org) and its associated packages. e.g. It's an amazing thing to do correct FITS coordinate conversions in one line of code.
# Author
Henry Roe ([email protected])
# License
*ztv* is licensed under the MIT License, see ``LICENSE.txt``. Basically, feel free to use any or all of this code in any way. But, no warranties, guarantees, etc etc.. | ztv | /ztv-0.2.3.post1.tar.gz/ztv-0.2.3.post1/README.md | README.md |
from __future__ import absolute_import
import wx
from .fake_fits_maker import FakeFitsMaker
from wx.lib.pubsub import pub
class FitsFakerPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
self.ztv_frame = self.GetTopLevelParent()
v_sizer1 = wx.BoxSizer(wx.VERTICAL)
v_sizer1.AddSpacer((0, 0), 1, wx.EXPAND)
static_text = wx.StaticText(self, wx.ID_ANY, u"Fits Faker", wx.DefaultPosition, wx.DefaultSize,
wx.ALIGN_CENTER )
static_text.Wrap( -1 )
v_sizer1.Add(static_text, 0, wx.ALL|wx.ALIGN_CENTER|wx.ALIGN_CENTER_VERTICAL, 0)
static_text = wx.StaticText(self, wx.ID_ANY, u"Example of a panel added-on to ztv", wx.DefaultPosition,
wx.DefaultSize, wx.ALIGN_CENTER )
static_text.Wrap( -1 )
v_sizer1.Add(static_text, 0, wx.ALL|wx.ALIGN_CENTER|wx.ALIGN_CENTER_VERTICAL, 0)
self.launch_button = wx.Button(self, wx.ID_ANY, u"Launch Fake Fits Maker", wx.DefaultPosition, wx.DefaultSize, 0)
v_sizer1.Add(self.launch_button, 0, wx.ALL|wx.ALIGN_CENTER|wx.ALIGN_CENTER_VERTICAL, 2)
self.launch_button.Bind(wx.EVT_BUTTON, self.on_launch_button)
self.halt_button = wx.Button(self, wx.ID_ANY, u"Halt Fake Fits Maker", wx.DefaultPosition, wx.DefaultSize, 0)
v_sizer1.Add(self.halt_button, 0, wx.ALL|wx.ALIGN_CENTER|wx.ALIGN_CENTER_VERTICAL, 2)
self.halt_button.Bind(wx.EVT_BUTTON, self.on_halt_button)
self.halt_button.Disable()
static_text = wx.StaticText(self, wx.ID_ANY, u"Note: clicking 'Halt' will delete files in /tmp/ for you", wx.DefaultPosition,
wx.DefaultSize, wx.ALIGN_CENTER )
static_text.Wrap( -1 )
v_sizer1.Add(static_text, 0, wx.ALL|wx.ALIGN_CENTER|wx.ALIGN_CENTER_VERTICAL, 0)
static_text = wx.StaticText(self, wx.ID_ANY, u"If you don't click Halt, you may want to go clean up *.fits in /tmp/",
wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTER )
static_text.Wrap( -1 )
v_sizer1.Add(static_text, 0, wx.ALL|wx.ALIGN_CENTER|wx.ALIGN_CENTER_VERTICAL, 0)
v_sizer1.AddSpacer((0, 0), 1, wx.EXPAND)
self.SetSizer(v_sizer1)
pub.subscribe(self.on_launch_button, "fits-faker-start")
pub.subscribe(self.on_halt_button, "fits-faker-stop")
def on_launch_button(self, evt=None):
self.fake_fits_maker = FakeFitsMaker(ztv_frame_pid=self.ztv_frame.ztv_frame_pid)
self.fake_fits_maker.start()
self.launch_button.Disable()
self.halt_button.Enable()
self.ztv_frame.source_panel.load_sky_frame('/tmp/sky_frame.fits', False)
self.ztv_frame.source_panel.load_flat_frame('/tmp/flat_frame.fits', False)
self.ztv_frame.source_panel.autoload_curfile_file_picker_on_load('/tmp/n*.fits')
def on_halt_button(self, evt=None):
self.launch_button.Enable()
self.halt_button.Disable()
self.fake_fits_maker.keep_running = False | ztv | /ztv-0.2.3.post1.tar.gz/ztv-0.2.3.post1/ztv_examples/fits_faker_panel/fits_faker_panel.py | fits_faker_panel.py |
import numpy as np
from astropy.io import fits
import time
import os.path
import os
import psutil
import threading
class FakeFitsMaker(threading.Thread):
def __init__(self, ztv_frame_pid=None):
self.ztv_frame_pid = ztv_frame_pid # will kill self if this pid no longer alive
self.nx = 512
self.ny = 512
self.flat_field_pixel_to_pixel_fractional_1sigma = 0.15
self.flat_field_num_dust_donuts = 30
self.flat_field_dust_donuts_radius_range = [20, 50]
self.flat_field_dust_donuts_width_range = [10, 15]
self.flat_field_dust_donuts_peak_range = [0.5, 0.8]
self.sky_pattern_mean_cts = 9000.
self.sky_pattern_row_to_row_variation_1sigma_cts = 2000.
self.saturation_cts = 2**16
self.seeing_gauss_width = 2.0 # not fwhm....being lazy
self.n_bkgd_stars = 50
self.n_moving_objects = 10
self.moving_objects = []
self.delay_between_frames_sec = 2.0
self.data_dir = '/tmp/'
self.files_to_delete = []
self.frame_number = 1
self.set_up_bkgd_stars()
self.make_flat_frame()
self.make_sky_frame()
self.write_to_fits_file(self.sky_frame, 'sky_frame.fits')
self.write_to_fits_file(self.flat_frame, 'flat_frame.fits')
threading.Thread.__init__(self)
self.daemon = True
def run(self):
self.keep_running = True
while self.keep_running:
im = self.make_data_frame()
self.write_to_fits_file(im, 'current.fits')
self.write_to_fits_file(im, 'n{:04d}.fits'.format(self.frame_number))
self.frame_number += 1
time.sleep(self.delay_between_frames_sec)
if not psutil.pid_exists(self.ztv_frame_pid):
self.keep_running = False
self.delete_files()
def set_up_bkgd_stars(self):
flux_lognormal_sigma = 1.0
flux_multiplier = 7000.
self.bkgd_stars = {}
self.bkgd_stars['x'] = np.random.uniform(low=0., high=self.nx - 1, size=self.n_bkgd_stars)
self.bkgd_stars['y'] = np.random.uniform(low=0., high=self.ny - 1, size=self.n_bkgd_stars)
self.bkgd_stars['peak_cts'] = flux_multiplier * np.random.lognormal(sigma=flux_lognormal_sigma,
size=self.n_bkgd_stars)
self.bkgd_stars_frame = np.zeros([self.ny, self.nx])
for i in np.arange(self.n_bkgd_stars):
dxs = np.outer(np.ones(self.ny), np.arange(self.nx)) - self.bkgd_stars['x'][i]
dys = np.outer(np.arange(self.ny), np.ones(self.nx)) - self.bkgd_stars['y'][i]
self.bkgd_stars_frame += (self.bkgd_stars['peak_cts'][i] *
np.exp(-((dxs)**2 + (dys)**2) / (2. * self.seeing_gauss_width**2)))
def new_moving_object(self):
flux_lognormal_sigma = 1.0
flux_multiplier = 10000.
new_object = {'peak_cts': flux_multiplier * np.random.lognormal(sigma=flux_lognormal_sigma)}
new_object['x'] = 0.
new_object['y'] = np.random.uniform(low=1., high=self.ny - 2)
new_object['dx'] = np.random.normal(loc=20, scale=15.)
new_object['dy'] = np.random.normal(loc=0., scale=10.)
return new_object
def advance_moving_objects(self):
remaining_moving_objects = []
for cur_moving_object in self.moving_objects:
cur_moving_object['x'] += cur_moving_object['dx']
cur_moving_object['y'] += cur_moving_object['dy']
if ((cur_moving_object['x'] >= 0.) and (cur_moving_object['y'] >= 0.) and
(cur_moving_object['x'] < self.nx) and (cur_moving_object['y'] < self.ny)):
remaining_moving_objects.append(cur_moving_object)
self.moving_objects = remaining_moving_objects
def calc_one_sky(self):
return np.array([np.random.poisson(a, size=self.nx) for a in self.sky_frame_row_baseline])
def make_sky_frame(self):
self.sky_frame_row_baseline = np.random.normal(loc=self.sky_pattern_mean_cts, scale=
self.sky_pattern_row_to_row_variation_1sigma_cts,
size=self.ny)
self.sky_frame_row_baseline[self.sky_frame_row_baseline <= 0] = 0.
self.sky_frame = self.calc_one_sky() * self.flat_frame
def make_flat_frame(self):
flat = np.random.normal(loc=1.0, scale=self.flat_field_pixel_to_pixel_fractional_1sigma, size=[self.ny, self.nx])
for i in range(self.flat_field_num_dust_donuts):
x = np.random.uniform(0., self.nx)
y = np.random.uniform(0., self.ny)
radius = np.random.uniform(min(self.flat_field_dust_donuts_radius_range),
max(self.flat_field_dust_donuts_radius_range))
width = np.random.uniform(min(self.flat_field_dust_donuts_width_range),
max(self.flat_field_dust_donuts_width_range))
peak = np.random.uniform(min(self.flat_field_dust_donuts_peak_range),
max(self.flat_field_dust_donuts_peak_range))
xdist = np.outer(np.ones(flat.shape[0]), np.arange(flat.shape[1]) - x)
ydist = np.outer(np.arange(flat.shape[0]) - y, np.ones(flat.shape[1]))
dist = np.sqrt(xdist**2 + ydist**2)
flat *= 1. - (1. - peak) * np.exp(-((dist - radius)**2) / (2. * width**2))
self.flat_frame = flat
def make_data_frame(self):
im = self.bkgd_stars_frame.copy()
self.advance_moving_objects()
while len(self.moving_objects) < self.n_moving_objects:
self.moving_objects.append(self.new_moving_object())
for cur_moving_object in self.moving_objects:
dxs = np.outer(np.ones(self.ny), np.arange(self.nx)) - cur_moving_object['x']
dys = np.outer(np.arange(self.ny), np.ones(self.nx)) - cur_moving_object['y']
im += (cur_moving_object['peak_cts'] * np.exp(-((dxs)**2 + (dys)**2) / (2. * self.seeing_gauss_width**2)))
im = (im + self.calc_one_sky()) * self.flat_frame
for x in np.arange(self.nx): # has to be a better way than this dumb/slow loop
for y in np.arange(self.ny):
im[y, x] = min(np.random.poisson(max(im[y, x], 0)), self.saturation_cts)
return im
def delete_files(self):
for curfile in self.files_to_delete:
try:
os.remove(os.path.join(self.data_dir, curfile))
except OSError: # ignore error if file doesn't exist
pass
def write_to_fits_file(self, im, filename):
max_files_on_disk = 10 # play nice with space in people's /tmp/ dirs
hdu = fits.PrimaryHDU(im)
hdu.writeto(os.path.join(self.data_dir, filename), clobber=True)
if filename not in self.files_to_delete:
if filename.startswith('n'):
self.files_to_delete = ([a for a in self.files_to_delete if a.startswith('n')] +
[filename] +
[a for a in self.files_to_delete if not a.startswith('n')])
else:
self.files_to_delete.append(filename)
while len(self.files_to_delete) > max_files_on_disk:
os.remove(os.path.join(self.data_dir, self.files_to_delete.pop(0)))
if __name__ == '__main__':
f = FakeFitsMaker()
f.start() | ztv | /ztv-0.2.3.post1.tar.gz/ztv-0.2.3.post1/ztv_examples/fits_faker_panel/fake_fits_maker.py | fake_fits_maker.py |
[](https://travis-ci.org/bmwcarit/zubbi)

<br/>
The Zuul Building Blocks Index (aka Zubbi) makes it easy to search for available
jobs and roles ("Building Blocks") within a [Zuul](https://zuul-ci.org/docs/zuul/)
based CI system - even if they are spread over multiple tenants or repositories.
---
*Contents:*
**[Architecture](#architecture)** |
**[Quickstart](#quickstart)** |
**[Development](#development)** |
**[Scraper usage](#scraper-usage)** |
**[Configuration Examples](#configuration-examples)** |
**[Available Connections](#available-connections)** |
---
## Architecture

Zubbi consists of two parts, **zubbi web** and **zubbi scraper**. It uses
**Elasticsearch** as storage backend and needs **Git repositories** as
source for job and role definitions.
### Zubbi web
A web frontend based on Flask that reads the data from Elasticsearch. It allows
searching for roles and jobs used within the CI system and shows the results
including their documentation, last updates, changelog and some additional meta
data.
### Zubbi scraper
A Python application that scrapes Git repositories, searches for job and
role definitions in specific files and stores them in Elasticsearch.
## Quickstart
Prerequisites: [Docker Compose](https://docs.docker.com/compose/)
Zubbi can simply be started by using the provided `docker-compose.yaml` file.
---
**NOTE**
The provided `Dockerfile` should only be used for demonstration purposes and not
in a production system. Flask is running in development mode and listens on all
public IPs to make it reachable from outside the docker container.
---
To get the whole stack up and running, do the following:
```shell
$ cd docker
$ docker-compose build
$ docker-compose up
```
This will build the docker container with the newest Zubbi version, start all
necessary services (Elasticsearch, zubbi-scraper, zubbi-web) and does a full
scrape of the `openstack-infra/zuul-jobs` repository to get an initial set of
data.
When everything is up, you can visit `http://localhost:5000` and explore the jobs
and roles from the `openstack-infra/zuul-jobs` repo.
## Development
Prerequisites: Python 3.6, [Tox](https://tox.readthedocs.io/en/latest/) and
[Pipenv](https://docs.pipenv.org/) installed.
To install necessary dependencies for development, run:
```shell
$ pipenv shell
$ pipenv install --dev
```
We are using [black](https://black.readthedocs.io/en/stable/) to ensure
well-formatted Python code. To automatically ensure this on each commit, you can
use the included pre-commit hook. To install the hook, simply run:
```shell
$ pre-commit install
```
Before submitting pull requests, run tests and static code checks using tox:
```shell
$ tox
```
### Installing & updating dependencies
New dependencies should be added to the `requires` list in the `setup.py` file:
```python
requires = [
"arrow",
"click",
...,
"<new dependency>",
]
```
Afterwards, run the following command to update the `Pipfile.lock` and install the
new dependencies in your local pipenv environment:
```shell
$ pipenv update
```
Test dependencies should be installed as development dependencies:
```shell
$ pipenv install --dev my-test-dependency
```
To update the dependencies to the latest version or after a new dependency was
installed you have to run `tox -e update-requirements` and commit the changed
Pipenv and requirements files.
### Configuring and starting Zubbi
If you followed the [Development](#development) guide so far, you should already
have a virtual environment with all required packages to run Zubbi. What's left,
are a few configuration files and a local Elasticsearch instance for testing.
#### Elasticsearch
Zubbi is currently depending on Elasticsearch as data backend. If you have
[Docker Compose](https://docs.docker.com/compose/) installed, you can use
the provided `docker-compose.yaml` file to start Elasticsearch locally.
```shell
$ cd docker
$ docker-compose up elasticsearch
```
If not, we recommend to use the latest available Elasticsearch Docker image, to
get a local instance up and running for development.
#### Configuration
Both - Zubbi scraper and Zubbi web - read their configuration from the file path
given via the `ZUBBI_SETTINGS` environment variable:
```shell
$ export ZUBBI_SETTINGS=$(pwd)/settings.cfg
```
In order to show jobs and roles in Zubbi, we need to provide a minimal
[tenant configuration](https://zuul-ci.org/docs/zuul/admin/tenants.html)
containing at least a single repository (which is used as source).
Therefore, put the following in a `tenant-config.yaml` file:
```yaml
- tenant:
name: openstack
source:
openstack-gerrit:
untrusted-projects:
- openstack-infra/zuul-jobs
```
Put the following in your `settings.cfg` to allow scraping based on the tenant
configuration above and store the results in the local Elasticsearch instance.
Please note, that the key in the `CONNECTIONS` dictionary must go in hand with
the `source` names in the tenant configuration.
```ini
ELASTICSEARCH = {
'host': 'localhost',
'port': 9200,
}
TENANT_SOURCES_FILE = 'tenant-config.yaml'
CONNECTIONS = {
'openstack-gerrit': {
'provider': 'git',
'git_host_url': 'https://git.openstack.org',
},
}
```
#### Running Zubbi
Now we can scrape the `openstack-infra/zuul-jobs` repository to get a first set
of jobs and roles into Elasticsearch and show them in Zubbi:
```shell
$ zubbi-scraper scrape --full
```
When the scraper run was successful, we can start Zubbi web to take a look at
our data:
```shell
$ export FLASK_APP=zubbi
$ export FLASK_DEBUG=true
$ flask run
```
### Building the syntax highlighting stylesheet with pygments
We are using a pre-build pygments stylesheet to highlight the code examples in
job and roles documentations. In case you want to rebuild this syntax highlighting
stylesheet (e.g. to try out another highlighting style) you can run the following
command:
```shell
$ pygmentize -S default -f html -a .highlight > zubbi/static/pygments.css
```
## Scraper usage
The Zubbi scraper supports two different modes: `periodic` (default) and `immediate`.
To start the scraper in periodic mode, simply run:
```shell
$ zubbi-scraper scrape
```
This should also scrape all repositories specified in the tenant configuration
for the first time.
To immediately scrape one or more repositories, you can use the following command:
```shell
# Scrape one or more repositories
$ zubbi-scraper scrape --repo 'orga1/repo1' --repo 'orga1/repo2'
# Scrape all repositories
$ zubbi-scraper scrape --full
```
Additionally, the scraper provides a `list-repos` command to list all
available repositories together with some additional information like the
last scraping timestamp and the git provider (connection type):
```shell
$ zubbi-scraper list-repos
```
## Configuration examples
Examples for all available settings can be found in `settings.cfg.example`.
### Tenant Configuration
Zubbi needs to know which projects contain the job and role definitions that
are used inside the CI system. To achieve this, it uses Zuul's
[tenant configuration](https://zuul-ci.org/docs/zuul/admin/tenants.html).
Usually, this tenant configuration is stored in a file that must be specified
in the `settings.cfg`, but it could also come from a repository.
```ini
# Use only one of the following, not both
TENANT_SOURCES_FILE = '<path_to_the_yaml_file>'
TENANT_SOURCES_REPO = '<orga>/<repo>'
```
### Elasticsearch Connection
The Elasticsearch connection can be configured in the `settings.cfg` like
the following:
```ini
ELASTICSEARCH = {
'host': '<elasticsearch_host>',
'port': 9200, # default
'user': '<user>',
'password': '<password>',
# Optional, to avoid name clashes with existing ES indices from other applications
# E.g. 'zubbi' will result in indices like 'zubbi-zuul-jobs', 'zubbi-ansible-roles', ...
index_prefix: '<prefix>',
# Optional, to enable SSL for the Elasticsearch connection.
# You must at least set 'enabled' to True and provide other parameters if the default
# values are not sufficient.
'tls': {
'enabled': False, # default
'check_hostname': True, # default
'verify_mode': 'CERT_REQUIRED', # default
},
}
```
## Available Connections
Currently, Zubbi supports the following connection types: **GitHub**, **Gerrit**
and **Git**. The latter one can be used for repositories that are not hosted on
either GitHub or Gerrit.
### GitHub
The GitHub connection uses GitHub's REST API to scrape the repositories. To be
able to use this connection, you need to create a GitHub App with the following
permissions:
```yaml
Repository contents: Read-only
Repository metadata: Read-only
```
If you are unsure about how to set up a GitHub App, take a look at the
[official guide](https://developer.github.com/apps/building-github-apps/creating-a-github-app/).
Once you have successfully created your GitHub App, you can define the connection
with the following parameters in your `settings.cfg` accordingly:
```ini
CONNECTIONS = {
'<name>': {
'provider': 'github',
'url': '<github_url>',
'app_id': <your_github_app_id>,
'app_key': '<path_to_keyfile>',
},
...
}
```
#### Using GitHub Webhooks
GitHub webhooks can be used to keep your Zubbi data up to date.
To activate GitHub webhooks, you have to provide a weebhook URL pointing to
the `/api/webhook` endpoint of your Zubbi web installation. The generated webhook
secret must be specified in the `GITHUB_WEBHOOK_SECRET` setting in your `settings.cfg`:
**NOTE:** As of now, GitHub webhooks are not supported on a per-connection base.
You can only have one webhook active in zubbi.
```ini
GITHUB_WEBHOOK_SECRET = '<secret>'
```
Zubbi web receives webhook events from GitHub, validates the secret and publishes
relevant events to the scraper via [ZMQ](https://pyzmq.readthedocs.io/en/latest/).
The Zubbi scraper on the other hand subscribes to the ZMQ socket and scrapes
necessary repositories whenever a event is received. In order to make this
communication work, you need to specify the following parameters in your `settings.cfg`:
```ini
# Zubbi web (publish)
ZMQ_PUB_SOCKET_ADDRESS = 'tcp://*:5556'
# Zubbi scraper (subscribe)
ZMQ_SUB_SOCKET_ADDRESS = 'tcp://localhost:5556'
```
### Gerrit
In contrary to GitHub, the Gerrit connection is based on
[GitPython](https://gitpython.readthedocs.io/en/stable/) as the Gerrit REST API
does not support all use cases. To use this connection, you have
to provide the following parameters in your `settings.cfg`:
```ini
CONNECTIONS = {
'<name>': {
'provider': 'gerrit',
'url': '<git_remote_url>',
# Only necessary if different from the git_remote_url
'web_url': '<gerrit_url>',
# The web_type is necessary to build the correct URLs for Gerrit.
# Currently supported types are 'cgit' (default) and 'gitweb'.
'web_type': 'cgit|gitweb',
# Optional, if authentication is required
'user': '<username>',
'password': '<password>',
},
...
}
```
### Git
The Git connection is also based on
[GitPython](https://gitpython.readthedocs.io/en/stable/) and can be used for Git
repositories that are not hosted on either GitHub or Gerrit. To use this connection,
put the following in your `settings.cfg`:
```ini
CONNECTIONS = {
'<name>': {
'provider': 'git',
'url': '<git_host_url>',
# Optional, if authentication is required
'user': '<username>',
'password': '<password',
},
...
}
```
*Happy coding!*
| zubbi | /zubbi-2.4.8.tar.gz/zubbi-2.4.8/README.md | README.md |
# Changelog
## 2.4.8
- Switch to default python dateformat for log messages
## 2.4.7
### Fixes
- Fixed a bug that made zubbi-scraper crash on empty repositories.
## 2.4.6
### Fixes
- Filter job files for *.yaml extension before parsing to avoid yaml parser
errors. So far, Zubbi was parsing all files (including e.g. a README.md file)
within the jobs directory which made the parser fail.
- Make the RepoParser more robust against files with invalid yaml content. In
case an error occurs during job or role parsing this should now only affect
the current repository but not the whole scraper run.
## 2.4.5
### Fixes
- Remove the noreferrer header from the zubbi repository link in the footer
to not leak internal URLs.
- Use pagination when fetching installation ids from GitHub. So far, Zubbi
only fetched the first 50 installations.
- Ignore job file if there is syntax error when parsing yaml content.
## 2.4.4
### Fixes
- Fixed a bug that resulted in an internal server error when using a search
query that contains a '/' in zubbi-web.
## 2.4.3
### Fixes
- Fixed a bug that made the zubbi-scraper crash when a zuul config directory
contained not only files but a nested directory structure. Those config
directories are now scraped recursively.
- Fixed a bug that made zubbi-scraper crash on startup if the TENANT_SOURCES_REPO
setting referenced an unknown connection.
## 2.4.2
### Fixes
- Fixed a GraphQL query which was relying on a master branch being present in
the repository.
## 2.4.1
### Fixes
- How-to page is updated with description of supported_os and reusable directives.
## 2.4.0
### New Features
- **UI:** Improve search results ordering. The search results will be ordered by
the priority of the fields matching the query. The priority is like the following:
name -> description -> tenant/repo.
- **Scraper** Projects(repositories) can be configured as "reusable" in settings.cfg.
When A repository is configured as "reusable", all jobs and roles scraped from this
repository are marked as "reusable".
## 2.3.0
### New Features
- **Scraper:** Support marking roles and jobs as "reusable". It parses the directive
in role's README file and job's description and store it in Elasticsearch.
- **UI:** Search result will display roles and jobs that are marked as "reusable"
on top, and highlight them.
## 2.2.2
### General
- Updated dependencies to newest versions
## 2.2.1
### Fixes
- Fixed a bug where the delete outdated query never matched repositories on
Github. To fix this behaviour, an additional repo_name.keyword field was
introduced in the git-repos index.
- The connections are now first initialized when a valid scraper command is
invoked. Thus, running `zubbi-scraper --help` or providing wrong command line
arguments should directly run and return without unnecessarily initializing
the connections to Github, Gerrit and Elasticsearch.
## 2.2.0
### General
- Updated dependencies to newest versions
## 2.1.2
### Fixes
- **Elasticsearch:** If an index_prefix is provided, but empty, it will be ignored.
## 2.1.1
### Fixes
- **Scraper:** Unfortunately, the scraper wasn't aware of the new Elasticsearch
configuration format and the SSL options.
## 2.1.0
### New Features
- **Elasticsearch:** Support SSL options for the Elasticsearch connection. For a
list of available options, take a look at the settings.cfg.example file.
### Deprecated
- **Configuration:** Changed the format on how to specify the Elasticsearch
connection in the config file to a single dictionary (like for CONNECTIONS).
The old format (prefixing everything with "ES_") is still supported but will
be removed in future versions. Please see the settings.cfg.example for details
to the new format.
## 2.0.0
### New Features
- **Scraper:** Update repo information in Elasticsearch directly after scraping.
Previously, we updated the information for all scraped repositories in one go to
reduce the amount of requests sent to Elasticsearch. However, this had the drawback,
that none of the repo information was updated if scraping any of the repositories
failed.
### Fixes
- **Scraper:** Fix a bug were the scraper was still trying to check out GitHub
repositories, although it didn't have a valid access token.
- **Scraper:** Don't fail when trying to split the owner from an invalid GitHub
repository names. Actually, the wrong name comes from a bug in the tenant scraper
which should be fixed in a future release. But for now, it's a good idea to make
this part more robust.
### Backwards incompatible changes
- **Elasticsearch:** Zubbi 2.x.x is only compatible with Elasticsearch major version 7.
## 1.3.0
### New Features
- **Experimental:** You can set an `ES_INDEX_PREFIX` in the `settings.cfg` file to
prefix all Elasticsearch indices with a custom value. This could be useful to avoid
name clashes if indices with the same name are already used by another part of the
system.
## 1.2.0
### New Features
- **UI:** Autofocus the search field on index page. You can now visit Zubbi and
directly start typing.
- **UI:** Show "last update" timestamps for jobs and roles in search results.
- **Extensions:** Allow custom tabs/contents on details page. When extending Zubbi,
someone can now add new tabs (in addition to the already existing 'Description'
and 'Changelog').
### Fixes
- **UI:** Use rendered description in search result cards to hide unparsable
Sphinx links.
## 1.1.0
### New Features
- Add support for `gitweb` and `cgit` as Gerrit web front-ends. Those are
necessary to build the correct URLs which are pointing to a job's or role's
definition file/directory in Gerrit.
### Fixes
- Make Gerrit credentials really optional
## 1.0.0
### New Features
- **Gerrit support:** Zubbi now supports scraping of Gerrit repositories. In
contrary to GitHub, most of the necessary operations (check out files, list
directories) are done via `gitpython` as the Gerrit API does not support all
use cases. This allows the usage of Git repositories that are independent of
GitHub or Gerrit.
- **Quickstart Guide:** The README file now contains a quickstart section,
explaining how to set up zubbi-web and zubbi-scraper with a local Elasticsearch
instance and get a first set of data.
### Fixes
- Reactivate markdown rendering after breaking change in `readme_renderer`
dependency.
| zubbi | /zubbi-2.4.8.tar.gz/zubbi-2.4.8/CHANGELOG.md | CHANGELOG.md |
# Zubbi Demo on Kubernetes
Prerequisites: [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)
installed
---
**NOTE:**
This guide is based on
[How to Set Up an Nginx Ingress with Cert-Manager on DigitalOcean Kubernetes](https://www.digitalocean.com/community/tutorials/how-to-set-up-an-nginx-ingress-with-cert-manager-on-digitalocean-kubernetes).
For more detailed information take a look at the original guide.
---
To set up the zubbi demo on a Kubernetes cluster, follow these steps:
1. Set the `KUBECONFIG` environment variable to point to your configuration
file, e.g.:
```shell
$ export KUBECONFIG=~/.kube/zubbi-k8s-config.yaml
```
2. Deploy following files in the `k8s` directory via `kubectl` to create the
deployments for Elasticsearch and Zubbi:
```shell
$ kubectl apply -f k8s/elasticsearch.yaml
$ kubectl apply -f k8s/zubbi.yaml
```
3. If you want Zubbi to be reachable from the outside, you need to set up a
Kubernetes Nginx Ingress Controller, like so:
```shell
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/cloud-generic.yaml
```
Afterwards, you need to define an Ingress for zubbi:
```shell
$ kubectl apply -f k8s/zubbi-ingress.yaml
```
If everything is set up, you should be able to see Zubbi running on http://zubbi.example.de.
| zubbi | /zubbi-2.4.8.tar.gz/zubbi-2.4.8/k8s/README.md | README.md |
from collections import Generator
from pytz import timezone
import requests
import os
from urllib.parse import urljoin
API_KEY = os.environ.get('ZUBIE_API_KEY', None)
CLIENT_ID = os.environ.get('ZUBIE_CLIENT_ID', None)
TZ = timezone('America/New_York')
class RestAdapter:
"""Adapter for communicating with Zubie API"""
def __init__(self, api_key=API_KEY, client_id=CLIENT_ID):
self.base_url = 'https://api.zubiecar.com/api/v2/zinc/'
self._api_key = api_key
self._client_id = client_id
self._headers = {'Zubie-Api-Key': api_key}
self._params = {'client_id': client_id}
def _get(self, resource: str, **kwargs) -> dict:
"""Base get method used for all api calls"""
params = self._params.copy() # Sets default parameters
params.update(kwargs) # Adds provided keyword arguments to the call
url = urljoin(self.base_url, resource)
return requests.get(url=url, headers=self._headers, params=params).json()
def _get_pages(self, *args, **kwargs):
"""Get function for getting pagination data"""
cursor = kwargs.get('cursor') # Cursor should be set to none initially
while cursor is not False:
result = self._get(*args, **kwargs)
cursor = result.get('cursor', False)
yield result
def get_devices(self, q=None) -> dict:
"""Lists all active and pending devices (vehicle connected hardware) in account."""
resource = 'devices'
return self._get(resource, q=q)
def get_device(self, key: str) -> dict:
"""Get single device by key"""
resource = f'device/{{key}}'
return self._get(resource, key=key)
def get_groups(self, group_keys=None, show_inactive=False) -> dict:
"""Lists groups available in the account, based on the group permissions of the user.
Groups are a way to provide hierarchical structure to account vehicles and restrict user permissions."""
resource = 'groups'
return self._get(resource, group_keys=group_keys, show_inactive=show_inactive)
def get_vehicles(self, q=None, tag_keys=None, group_keys=None, cursor=None, size=None, expand=None) -> Generator:
"""Lists Vehicles in account. Restricted based on API user's group permissions. :param q: Search vehicles by
nickname or full VIN. :type q: str :param tag_keys: Restrict results to include only vehicles with these tag
keys. Multiple tag values may be provided, treated as an OR in filter. :type tag_keys: str :param group_keys:
Restrict results to include only vehicles that are members of these groups (or their descendants). :type
group_keys: str :param cursor: The cursor string used for pagination, signifying the object ID where to start
the results. :type cursor: str :param size: The number of results to return per call. Default 50 if not
provided. :type size: str :param expand: Default: [] Items Enum:"tags" "groups" "devices" "last_trip"
Optional list of expanded properties to include in results :type expand: str
"""
resource = 'vehicles'
return self._get_pages(resource, q=q, tag_keys=tag_keys, group_keys=group_keys, cursor=cursor, size=size,
expand=expand)
def get_vehicle(self, vehicle_key: str) -> dict:
"""Retrieve a single vehicle by vehicle key"""
resource = f'vehicle/{{vehicle_key}}'
return self._get(resource, vehicle_key=vehicle_key)
def get_nearby_vehicles(self, lat: str, long: str, cursor=None, size=None) -> Generator:
"""Get list of nearby vehicles, using a given GPS point. Restricted based on API user's group permissions.
:param lat: Latitude of the point.
:param long: Longitude of the point.
:param: cursor: The cursor string used for pagination, signifying the object ID where to start the results.
:type cursor: str
:param size: The number of results to return per call. Default 5 if not provided
:type size: str
"""
resource = 'vehicles/nearby'
return self._get_pages(resource, lat=lat, long=long, cursor=cursor, size=size)
def get_trips(self, user_key=None, vehicle_key=None, started_after=None, started_before=None, tag_keys=None,
cursor=None, size=None, expand=None) -> Generator:
"""Get a list of trips for the account. A trip is the logical grouping of all points that are recorded from
the time the vehicle’s engine is started to the time the engine is turned off. :param user_key: Filter
results to visits to a single driver. Optional. :type user_key: str :param vehicle_key: Filter results to a
single vehicle. Optional. :type vehicle_key: str :param started_after: Filter results to only include trips
that started on or after this timestamp. ISO8601 format (if no offset provided, assumed UTC). Optional. :type
started_after: str :param started_before: Filter results to only include trips that started on or before this
timestamp. ISO8601 format (if no offset provided, assumed UTC). Optional. :type started_before: str :param
tag_keys: Restrict results to include only vehicles with these tag keys. Multiple tag values may be provided.
Takes precedence over tags query param if both provided. :type tag_keys: str :param cursor: The cursor string
used for pagination, signifying the object ID where to start the results. :type cursor: str :param size: The
number of results to return per call. Default 10 if not provided. :type size: str :param expand: Default: [
"user","vehicle","tags"] Items Enum:"user" "vehicle" "tags" Optional list of expanded properties to include
in results :type expand: str
"""
resource = 'trips'
return self._get_pages(resource, user_key=user_key, vehicle_ke=vehicle_key, started_after=started_after,
started_before=started_before, tag_keys=tag_keys, cursor=cursor, size=size,
expand=expand)
def get_trip(self, trip_key: str) -> dict:
"""Get the details for a single trip."""
resource = f'trip/{{trip_key}}'
return self._get(resource, trip_key=trip_key)
def get_trip_points(self, trip_key: str, cursor=None, size=None) -> Generator:
"""Get the detailed GPS points and event details for a given trip.
:param trip_key: Key for individual trip.
:param cursor: The cursor string used for pagination, signifying the object ID where to start the results.
:type cursor: str
:param size: The number of results to return per call. Default 200 if not provided
:type size: str
"""
resource = f'trip/{{trip_key}}/points'
return self._get_pages(resource, trip_key=trip_key, cursor=cursor, size=size)
def get_visits(self, driver_key=None, vehicle_key=None, place_key=None, entry_after=None, entry_before=None,
cursor=None, size=None) -> Generator:
"""Get a a list of visits to all places for a given driver, vehicle or place. A visit begins at the trip
point a car enters a geofence, and ends at the point they exit. :param driver_key: Filter results to visits
to a single driver. Optional. :type driver_key: str :param vehicle_key: Filter results to a single vehicle.
Optional. :type vehicle_key: str :param place_key: Filter results to a single place. Optional. :type
place_key: str :param entry_after: Filter results to only include visits that started after this timestamp.
Optional. :type entry_after: str :param entry_before: Filter results to only include visits that started
before this timestamp. Optional. :type entry_before: str :param cursor: The cursor string used for
pagination, signifying the object ID where to start the results. :type cursor: str :param size: The number of
results to return per call. Default 10 if not provided. :type size: str
"""
return self._get_pages(driver_key=driver_key, vehicle_key=vehicle_key, place_key=place_key,
entry_after=entry_after, entry_before=entry_before, cursor=cursor, size=size)
if __name__ == '__main__':
RA = RestAdapter()
devices = RA.get_devices()
groups = RA.get_groups()
vehicles = RA.get_vehicles()
trips = RA.get_trips() | zubie-api | /zubie_api-0.1-py3-none-any.whl/zubie_api/client.py | client.py |
# Zubr exchange SDK
## Simple example
```python
import logging
from pprint import pprint
from zubr import ZubrSDK, OrderType, TimeInForce
zubr_sdk = ZubrSDK(
api_key='YOUR-API-KEY-HERE',
api_secret='YOUR-API-SECRET-HERE',
)
logging.basicConfig(level=logging.INFO)
context = {
'order_placed': False,
'sell_price': '0',
}
def sell_and_cancel(message):
print(f'order placed: {message}')
order_id = message['result']['value']
# Cancel order
zubr_sdk.cancel_order(
order_id=order_id,
callback=lambda x: (
print(f'Order cancelled: {x}')
),
)
def sell_and_replace(message):
print(f'order placed: {message}')
order_id = message['result']['value']
# Replace order
zubr_sdk.replace_order(
order_id=order_id,
price=context['sell_price'],
size=2,
callback=lambda x: (
print(f'Order replaced: {x}')
),
)
# Fetch orderbook
@zubr_sdk.subscribe_orderbook
def on_orderbook(message):
print('orderbook:')
pprint(message)
if context['order_placed']:
return
instrument_id, orders = list(message['value'].items())[0]
sell_price = max(x['price'] for x in orders['asks'])
context['sell_price'] = sell_price
# Place and replace
zubr_sdk.sell(
instrument_id=instrument_id,
price=sell_price,
size=1,
order_type=OrderType.LIMIT,
time_in_force=TimeInForce.GTC,
callback=sell_and_replace,
)
# Place and cancel
zubr_sdk.sell(
instrument_id=instrument_id,
price=sell_price,
size=1,
order_type=OrderType.LIMIT,
time_in_force=TimeInForce.GTC,
callback=sell_and_cancel,
)
context['order_placed'] = True
# Fetch last trades
@zubr_sdk.subscribe_last_trades
def on_last_trades(message):
print('last trades:')
pprint(message)
zubr_sdk.run_forever()
``` | zubr-temp | /zubr_temp-0.2.4.tar.gz/zubr_temp-0.2.4/README.md | README.md |
import hashlib
import json
import logging
from datetime import timezone, datetime
from decimal import Decimal
from enum import IntEnum, Enum
from hmac import HMAC
from typing import Callable, Union, Dict, Optional
from websocket import WebSocketApp
__all__ = {
"ZubrSDK",
'ZubrSDKError',
'ZubrSDKLoginError',
'OrderSide',
'OrderType',
'TimeInForce',
}
logger = logging.getLogger(__name__)
class ZubrSDKError(Exception):
"""
Common ZubrSDK error
"""
def __init__(self, message, code=None, response=None):
super().__init__(message)
self.message = message
self.code = code
self.response = response
class ZubrSDKLoginError(ZubrSDKError):
"""
This exception is raised when login credentials are wrong
"""
class _Method(IntEnum):
CHANNEL = 1
RPC = 9
def _decode_decimal(encoded_decimal: dict):
"""
Decodes decimal from internal format
"""
return Decimal(
encoded_decimal['mantissa']
).scaleb(
encoded_decimal['exponent']
)
def _encode_decimal(value: Union[Decimal, int, str]):
"""
Encodes decimal into internal format
"""
value = Decimal(value)
exponent = value.as_tuple().exponent
mantissa = int(value.scaleb(-exponent))
return {
'mantissa': mantissa,
'exponent': exponent
}
def _decode_response(response: Union[dict, list]) -> Union[dict, list]:
"""
Decodes internal decimal representation into decimal.Decimal()
"""
if isinstance(response, dict):
if 'mantissa' in response and 'exponent' in response:
return _decode_decimal(response)
else:
for key, value in response.items():
response[key] = _decode_response(value)
return response
elif isinstance(response, list):
for item in response:
_decode_response(item)
return response
def login_required(fn) -> Callable:
def wrap(self, *args, **kwargs):
if not (self._api_key and self._api_secret):
raise ZubrSDKError(
'Login required to perform this operation'
)
return fn(self, *args, **kwargs)
return wrap
CallbackType = Callable[[Dict], None]
class OrderType(str, Enum):
LIMIT = 'LIMIT'
POST_ONLY = 'POST_ONLY'
class OrderSide(str, Enum):
BUY = 'BUY'
SELL = 'SELL'
class TimeInForce(str, Enum):
SESSION = 'SESSION'
GTC = 'GTC'
IOC = 'IOC'
FOK = 'FOK'
class ZubrSDK:
def __init__(
self,
api_key: str = None,
api_secret: str = None,
api_url='wss://zubr.io/api/v1/ws',
default_callback: CallbackType = None,
):
self._api_key: str = api_key
self._api_secret: str = api_secret
self._api_url: str = api_url.rstrip('/')
self._logged_in: bool = False
self._message_id: int = 0
self._channel_callbacks: Dict[str, CallbackType] = {}
self._message_callbacks: Dict[str, CallbackType] = {}
self._error_callback: Optional[CallbackType] = None
self._default_callback: Optional[CallbackType] = default_callback
self._delayed_requests = []
self._ws_app: WebSocketApp = WebSocketApp(
api_url,
header=[
"User-Agent: ZubrSDK",
],
on_open=self._on_open,
on_message=self._on_message,
)
self._ws_open: bool = False
def _resubscribe(self):
channel_callbacks = self._channel_callbacks
self._channel_callbacks = {}
for channel, callback in channel_callbacks.items():
self._subscribe(channel, callback)
def subscribe_errors(self, callback: CallbackType):
"""
Callback will be called when server sends response errors
"""
self._error_callback = callback
def subscribe_orders(self, callback: CallbackType):
self._subscribe('orders', callback)
def subscribe_order_fills(self, callback: CallbackType):
self._subscribe('orderFills', callback)
def subscribe_instruments(self, callback: CallbackType):
self._subscribe('instruments', callback)
def subscribe_last_trades(self, callback: CallbackType):
self._subscribe('lasttrades', callback)
def subscribe_orderbook(self, callback: CallbackType):
self._subscribe(f'orderbook', callback)
def subscribe_balance(self, callback: CallbackType):
self._subscribe(f'balance', callback)
def subscribe_candles(self, instrument_id: int, resolution: str, callback: CallbackType):
self._subscribe(f'candles:{instrument_id}:{resolution}', callback)
@login_required
def place_order(
self,
instrument_id: int,
price: Union[Decimal, int, str],
size: int,
order_type: OrderType,
time_in_force: TimeInForce,
side: OrderSide,
callback: CallbackType,
):
"""
:param instrument_id: int
:param price: one of: Decimal, int, str
:param size: int
:param order_type: one of: 'LIMIT', 'POST_ONLY'
:param time_in_force: one of: 'GTC', 'IOC', 'FOK'
:param side: one of: 'BUY', 'SELL'
:param callback: function that will be called when server sends place_order response
:return: None
"""
return self._rpc(
method='placeOrder',
params={
'instrument': instrument_id,
'price': _encode_decimal(price),
'size': size,
'type': order_type,
'timeInForce': time_in_force,
'side': side,
},
callback=callback
)
@login_required
def replace_order(
self,
order_id: str,
price: Union[Decimal, int, str],
size: int,
callback: CallbackType,
):
return self._rpc(
method='replaceOrder',
params={
'orderId': order_id,
'price': _encode_decimal(price),
'size': size,
},
callback=callback
)
@login_required
def buy(
self,
instrument_id: int,
price: Union[Decimal, int, str],
size: int,
order_type: OrderType,
time_in_force: TimeInForce,
callback: CallbackType,
):
"""
Places a buy order
Shortcut for place_order method
"""
return self.place_order(
instrument_id=instrument_id,
price=price,
size=size,
order_type=order_type,
time_in_force=time_in_force,
callback=callback,
side=OrderSide.BUY,
)
@login_required
def sell(
self,
instrument_id: int,
price: Union[Decimal, int, str],
size: int,
order_type: OrderType,
time_in_force: TimeInForce,
callback: CallbackType,
):
"""
Places a sell order
Shortcut for place_order method
"""
return self.place_order(
instrument_id=instrument_id,
price=price,
size=size,
order_type=order_type,
time_in_force=time_in_force,
callback=callback,
side=OrderSide.SELL,
)
@login_required
def cancel_order(
self,
order_id: str,
callback: CallbackType
):
return self._rpc(
method='cancelOrder',
params=order_id,
callback=callback
)
def get_candles_range(
self,
instrument_id: int,
resolution: str,
timestamp_from: int,
timestamp_to: int,
callback: CallbackType
):
return self._rpc(
'getCandlesRange', {
"instrumentId": instrument_id,
"resolution": resolution,
"from": timestamp_from,
"to": timestamp_to
},
callback=callback
)
def run_forever(self):
self._ws_app.run_forever(
ping_interval=15,
suppress_origin=True
)
@staticmethod
def _encode_hmac_message(message):
buffer = []
for key in sorted(message.keys()):
buffer.append(f'{key}={message[key]}')
return ';'.join(buffer).encode('utf-8')
def _on_login(self, response: dict):
if 'error' in response:
raise ZubrSDKError(
'An error occurred when logging in',
response=response
)
result = response['result']
if result['tag'] == 'err':
raise ZubrSDKLoginError(
'Wrong credentials',
code=result.get('value', {}).get('code')
)
self._logged_in = True
self._send_delayed_requests()
def _try_login(self):
"""
Tries to log in if credentials presented
"""
if not (self._api_key and self._api_secret):
self._send_delayed_requests()
return
utc_now = datetime.now(timezone.utc)
timestamp = int(utc_now.timestamp())
auth_code = HMAC(
key=bytes.fromhex(self._api_secret),
msg=self._encode_hmac_message({
'key': self._api_key,
'time': timestamp,
}),
digestmod=hashlib.sha256
).digest().hex()
self._rpc(
'loginSessionByApiToken',
{
'apiKey': self._api_key,
'time': {
'seconds': timestamp,
'nanos': 0
},
'hmacDigest': auth_code,
},
callback=self._on_login
)
def _send_delayed_requests(self):
delayed_requests = self._delayed_requests
self._delayed_requests = []
for request in delayed_requests:
self._ws_app.send(request)
def _create_request(
self,
method: _Method,
params: dict,
) -> dict:
"""
Creates request message
"""
self._message_id += 1
return {
'id': self._message_id,
'method': method,
'params': params
}
def _subscribe(self, channel: str, callback: CallbackType):
"""
Subscribes to the given channel
"""
if channel in self._channel_callbacks:
raise Exception(
f'Already subscribed to channel {channel!r}'
)
self._channel_callbacks[channel] = callback
request = self._create_request(
_Method.CHANNEL,
{'channel': channel}
)
self._send(request)
def _send(self, request: dict):
request = json.dumps(request)
if self._ws_open:
self._ws_app.send(request)
else:
self._delayed_requests.append(request)
def _rpc(
self,
method: str,
params=None,
callback: CallbackType = None
):
"""
Sends rpc request to the server
"""
request = self._create_request(
_Method.RPC,
{
"data": {
"method": method,
"params": params or {}
}
}
)
if callback:
self._message_callbacks[request['id']] = callback
self._send(request)
def _on_open(self):
self._ws_open = True
self._try_login()
def _on_message(
self,
message: str,
):
data = json.loads(message)
data = _decode_response(data)
if 'id' in data:
callback = self._message_callbacks.pop(data['id'], None)
if callback:
callback(data)
elif 'error' in data:
error_message = data['error'].get('message')
error_code = data['error'].get('code')
if self._error_callback is None:
raise ZubrSDKError(
f'Server sent error: {error_message or error_code}',
code=error_code,
response=data
)
else:
self._error_callback(data)
elif 'channel' in data['result']:
result: dict = data['result']
channel_key = result['channel']
data = result['data']
try:
channel_handler = self._channel_callbacks[channel_key]
except KeyError:
pass
else:
channel_handler(data)
elif self._default_callback:
self._default_callback(data) | zubr-temp | /zubr_temp-0.2.4.tar.gz/zubr_temp-0.2.4/zubr/__init__.py | __init__.py |
# Zubr exchange SDK
## Simple example
```python
import logging
from pprint import pprint
from zubr import ZubrSDK, OrderType, TimeInForce
zubr_sdk = ZubrSDK(
api_key='YOUR-API-KEY-HERE',
api_secret='YOUR-API-SECRET-HERE',
)
logging.basicConfig(level=logging.INFO)
context = {
'order_placed': False,
'sell_price': '0',
}
def sell_and_cancel(message):
print(f'order placed: {message}')
order_id = message['result']['value']
# Cancel order
zubr_sdk.cancel_order(
order_id=order_id,
callback=lambda x: (
print(f'Order cancelled: {x}')
),
)
def sell_and_replace(message):
print(f'order placed: {message}')
order_id = message['result']['value']
# Replace order
zubr_sdk.replace_order(
order_id=order_id,
price=context['sell_price'],
size=2,
callback=lambda x: (
print(f'Order replaced: {x}')
),
)
# Fetch orderbook
@zubr_sdk.subscribe_orderbook
def on_orderbook(message):
print('orderbook:')
pprint(message)
if context['order_placed']:
return
instrument_id, orders = list(message['value'].items())[0]
sell_price = max(x['price'] for x in orders['asks'])
context['sell_price'] = sell_price
# Place and replace
zubr_sdk.sell(
instrument_id=instrument_id,
price=sell_price,
size=1,
order_type=OrderType.LIMIT,
time_in_force=TimeInForce.GTC,
callback=sell_and_replace,
)
# Place and cancel
zubr_sdk.sell(
instrument_id=instrument_id,
price=sell_price,
size=1,
order_type=OrderType.LIMIT,
time_in_force=TimeInForce.GTC,
callback=sell_and_cancel,
)
context['order_placed'] = True
# Fetch last trades
@zubr_sdk.subscribe_last_trades
def on_last_trades(message):
print('last trades:')
pprint(message)
zubr_sdk.run_forever()
``` | zubr | /zubr-0.2.2.tar.gz/zubr-0.2.2/README.md | README.md |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | zuby-test-dsnd-distributions | /zuby_test_dsnd_distributions-0.1.tar.gz/zuby_test_dsnd_distributions-0.1/zuby_test_dsnd_distributions/Gaussiandistribution.py | Gaussiandistribution.py |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | zuby-test-dsnd-distributions | /zuby_test_dsnd_distributions-0.1.tar.gz/zuby_test_dsnd_distributions-0.1/zuby_test_dsnd_distributions/Binomialdistribution.py | Binomialdistribution.py |
========
Zucchini
========
.. image:: https://img.shields.io/pypi/v/zucchini.svg
:target: https://pypi.python.org/pypi/zucchini
.. image:: https://travis-ci.com/zucchini/zucchini.svg?branch=master
:target: https://travis-ci.com/zucchini/zucchini
.. image:: https://readthedocs.org/projects/zucchini/badge/?version=latest
:target: https://zucchini.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://pyup.io/repos/github/zucchini/zucchini/shield.svg
:target: https://pyup.io/repos/github/zucchini/zucchini/
:alt: Updates
Zucchini is an automatic grader tool for use in grading programming assignments.
* Free software: Apache Software License 2.0
* Documentation: https://zucchini.readthedocs.io.
Installation
------------
::
$ pip install --user zucchini
$ zucc --help
Getting Started with Development
--------------------------------
After cloning this repo and installing virtualenv, run
::
$ virtualenv -p python3 venv
$ . venv/bin/activate
$ pip install -r requirements.txt
$ pip install -r requirements_dev.txt
$ zucc --help
Features
--------
* Unified grading infrastructure: eliminates maintenance load of ad-hoc
per-assignment graders
* Separates test results from computed grades: graders provide test
results which are stored on disk, and then zucchini calculates grade
based on the weight of each test. That is, graders do not perform
grade calculation; they only gather information about students' work
* Simple configuration: update one YAML file and store your graders in
git repositories for all your TAs
* Relative weighting: no more twiddling with weights to get them to add
up to 100
* Import submissions from Gradescope, Canvas Assignments, or Canvas
Quizzes
* No more copy-and-pasting grades and commments: automated upload of
Canvas grades and gradelogs
* Flatten (extract) archived submissions
* Gradescope integration: generate a Gradescope autograder tarball for
an assignment with one command
Credits
---------
* Austin Adams (@ausbin) for creating lc3grade, which eventually became
zucchini
* Cem Gokmen (@skyman) for suggesting converting lc3grade into a
generalized autograder for more than just C and LC-3 homeworks, and
creating the initial structure of zucchini
* Patrick Tam (@pjztam) for implementing a bunch of graders, gradelogs,
and gradelog upload
* Kexin Zhang (@kexin-zhang) for exploring Canvas bulk submission
downloads and for creating the demo downloader, which changed our
lives
* Travis Adams (@travis-adams) for nothing
| zucchini | /zucchini-2.1.2.tar.gz/zucchini-2.1.2/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/zucchini/zucchini/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
and "help wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Zucchini could always use more documentation, whether as part of the
official Zucchini docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/zucchini/zucchini/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zucchini` for local development.
1. Fork the `zucchini` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zucchini.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zucchini
$ cd zucchini/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 zucchini tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.4-3.8, and for PyPy. Check
https://travis-ci.org/zucchini/zucchini/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ python -m unittest tests.test_zucchini
| zucchini | /zucchini-2.1.2.tar.gz/zucchini-2.1.2/CONTRIBUTING.rst | CONTRIBUTING.rst |
==================
Config File Format
==================
Configuration files need to be valid YAML files that contain the following
fields:
.. code:: yaml
name: # Friendly name for the assignment
author: # Author's name (and email if possible)
components:
- name: # Friendly name for the component
weight: # Weight of the component (integer)
files: # Files that need to be copied from the submission folder
grading-files: # Files that need to be copied from the grading folder
backend: # Name of the Python class for the grader (e.g. PromptGrader)
backend-options:
# The grader backend's options come here - these are listed on the grader's docs
| zucchini | /zucchini-2.1.2.tar.gz/zucchini-2.1.2/docs/config-files.rst | config-files.rst |
Subsets and Splits