code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import sys
import unittest
sys.path.append('../sgtnclient')
from sgtn_util import FileUtil, NetUtil, SysUtil
from sgtn_debug import SgtnDebug
from util import Util
import I18N
class TestClient(unittest.TestCase):
def test_file_util(self):
print('\n--- unittest --- %s --- python %s\n' % (
sys._getframe().f_code.co_name, sys.version_info.major))
text = FileUtil.read_text_file('data/data_utf8.txt')
self.assertIn('cc=AA{x}BB{y}CC', text)
dt = FileUtil.read_json_file('data/data.json')
self.assertEqual(dt['aa'], 'aaa')
print('--- json --- %s ---' % dt)
dt['add'] = 'über'
FileUtil.save_json_file('./log/data2.json', dt)
dtLoad = FileUtil.read_json_file('./log/data2.json')
self.assertEqual(dtLoad['add'], 'über')
dt = FileUtil.read_datatree('config/sgtn_online_only.yml')
self.assertEqual(dt['cache_type'], 'by_key')
print('--- yaml --- %s ---' % dt['cache_type'])
dir_list, file_list = FileUtil.get_dir_info('data')
print('--- dir_list --- %s ---' % dir_list)
print('--- file_list --- %s ---' % len(file_list))
self.assertIn('http_response.txt', file_list)
#SgtnDebug.set_internal_log('./log/debug.txt')
SgtnDebug.log_text('add', 'aaa')
SgtnDebug.log_text('add', 'bbb')
SgtnDebug.log_text('add', {'aa': 'aaa'})
def test_net_util(self):
print('\n--- unittest --- %s --- python %s\n' % (
sys._getframe().f_code.co_name, sys.version_info.major))
NetUtil.simulate_data = Util.load_response(['data/http_response.txt'])
dt = FileUtil.read_datatree('config/sgtn_online_only.yml')
online_url = dt['online_service_url']
parts = online_url.split('/')[:3]
parts.append('i18n/api/v2/translation/products/PYTHON1/versions/1.0.0/localelist')
url = '/'.join(parts)
text = NetUtil.http_get_text(url)
self.assertIn('productName', text)
code, dt = NetUtil.http_get(url, None)
self.assertEqual(code, 200)
self.assertIn('data', dt['result'])
etag, max_age = NetUtil.get_etag_maxage(dt['headers'])
headers = {'If-None-Match': etag}
code, dt = NetUtil.http_get(url, headers)
self.assertEqual(code, 304)
def test_sys_util(self):
print('\n--- unittest --- %s --- python %s\n' % (
sys._getframe().f_code.co_name, sys.version_info.major))
locale = SysUtil.get_fallback_locale('ZH_cn')
print('--- locale --- %s ---' % locale)
self.assertEqual(locale, 'zh-Hans')
locale = SysUtil.get_fallback_locale('EN_us')
print('--- locale --- %s ---' % locale)
self.assertEqual(locale, 'en')
if __name__ == '__main__':
unittest.main()
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/test/test_sgtn_util.py
|
test_sgtn_util.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import sys
import unittest
import time
import threading
from threading import Thread
sys.path.append('../sgtnclient')
if sys.version_info.major == 2:
# Support utf8 text
reload(sys)
sys.setdefaultencoding('utf-8')
import I18N
count = 0
WAIT = 6
from util import logger, Util
class WorkThread(Thread):
def __init__(self, locale):
global count
count += 1
Thread.__init__(self)
self.locale = locale
self.count = count
def run(self):
global logger
I18N.set_current_locale(self.locale)
time.sleep(WAIT)
theLocale = I18N.get_current_locale()
thid = threading.current_thread().ident
logger.info('--- %s --- th%s --- %s ---' % (self.count, thid, theLocale))
class TestMultiTask(unittest.TestCase):
def do_test_current_locale_in_threads(self):
global logger
locales = ['en', 'en-US', 'de', 'zh-CN', 'fr']
for k in range(3):
for i in range(100):
th = WorkThread(locales[i % len(locales)])
th.start()
time.sleep(1)
logger.info('>\n>\n>\n')
I18N.set_current_locale('de')
theLocale = I18N.get_current_locale()
time.sleep(WAIT)
def do_test_current_locale_in_async(self):
global logger
if Util.is_async_supported():
from async_util import AsyncWork
AsyncWork().hello()
def test_all(self):
self.do_test_current_locale_in_threads()
self.do_test_current_locale_in_async()
logger.info('--- end ---')
time.sleep(1)
if __name__ == '__main__':
unittest.main()
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/test/test_sgtn_multitask.py
|
test_sgtn_multitask.py
|
# -*-coding:UTF-8 -*-
#
# Copyright 2020-2021 VMware, Inc.
# SPDX-License-Identifier: EPL-2.0
#
import unittest
import os
import time
import json
import sys
sys.path.append('../sgtnclient')
import I18N
from sgtn_util import FileUtil, NetUtil
from util import Util, allTestData
plans = [
{'product': 'PYTHON1', 'config': 'config/sgtn_online_only.yml'},
{'product': 'PYTHON2', 'config': 'config/sgtn_offline_only.yml'},
{'product': 'PYTHON3', 'config': 'config/sgtn_online_localsource.yml'},
{'product': 'PYTHON4', 'config': 'config/sgtn_online_default.yml'},
{'product': 'PYTHON5', 'config': 'config/sgtn_offline_default.yml'}
]
VERSION = '1.0.0'
COMPONENT = 'about'
LOCALE = 'de'
class TestClient(unittest.TestCase):
def prepare_sub_path(self, sub):
current_path = os.path.dirname(__file__)
sub_path = os.path.join(current_path, sub)
if not os.path.exists(sub_path):
os.makedirs(sub_path)
def show(self, text1, text2, value):
print('--- %s --- %s --- %s ---' % (text1, text2, value))
def check_locale(self, trans, locale):
fallback_locale = trans.get_locale_supported(locale)
self.show('locale', locale, fallback_locale)
def dict2string(self, dict):
return json.dumps(dict, ensure_ascii = False, indent = 2)
def need_wait(self, cfg_info):
if (cfg_info.get('local') and cfg_info.get('remote')):
return True
return False
def do_test(self, plan):
cfg = I18N.add_config_file(
plan['config'],
{'$PRODUCT': plan['product'], '$VERSION': VERSION})
self.assertEqual(cfg.get_info()['version'], VERSION)
start = time.time()
I18N.set_current_locale(LOCALE)
I18N.set_current_locale(LOCALE)
current = I18N.get_current_locale()
print('--- current --- %s ---' % current)
self.assertEqual(current, 'de')
rel = I18N.get_release(plan['product'], VERSION)
cfg = rel.get_config()
cfg_info = cfg.get_info()
self.show('config', 'info', self.dict2string(cfg_info))
trans = rel.get_translation()
self.check_locale(trans, 'ZH_cn')
self.check_locale(trans, 'EN_us')
Util.run_test_data(self, trans, 'TestGetString1')
Util.run_test_data(self, trans, 'TestGetString1T')
if Util.is_async_supported():
Util.run_test_data(self, trans, 'TestGetString1A')
Util.run_test_data(self, trans, 'TestGetString2')
groupName = 'TestGetStringSameLocale'
if cfg.get_info()['default_locale'] != cfg.get_info()['source_locale']:
groupName = 'TestGetStringDifferentLocale'
Util.run_test_data(self, trans, groupName)
Util.run_test_data(self, trans, 'TestGetStringTemp')
I18N.set_current_locale(LOCALE)
found = trans.get_string('TT', 'about.title')
print('--- found --- wrong component --- %s ---' % found)
if (self.need_wait(cfg_info)):
time.sleep(5)
found = trans.get_string(None, 'about.title', format_items = ['11', '22'])
print('--- found --- format in array --- %s ---' % found)
self.assertEqual(found, 'Über Version 22 of Product 11')
found = trans.get_string(None, 'about.title2', format_items = {'x': 'ee', 'y': 'ff'})
print('--- found --- format in dict --- %s ---' % found)
self.assertEqual(found, 'Über Version ee of Product ff')
spent = time.time() - start
data = trans.get_locale_strings('en-US', True)
print('--- source --- en-US --- %s ---' % data)
data = trans.get_locale_strings('en-US', False)
print('--- translate --- en-US --- %s ---' % data)
data = trans.get_locale_strings('de', False)
print('--- translate --- de --- %s ---' % data)
if (self.need_wait(cfg_info)):
Util.run_test_data(self, trans, 'TestGetString3')
if NetUtil.record_data['enable']:
time.sleep(5)
FileUtil.save_json_file('data/simulate.json', NetUtil.record_data['records'])
time.sleep(1)
print('--- test --- end --- %s ---' % spent)
def test_api(self):
print('\n--- unittest --- %s --- python %s\n' % (
sys._getframe().f_code.co_name, sys.version_info.major))
NetUtil.simulate_data = Util.load_response(['data/http_response.txt'])
NetUtil.record_data['enable'] = False
Util.load_test_data(['data/test_define.txt'])
print('--- test start ---')
self.prepare_sub_path('log')
self.prepare_sub_path('singleton')
for i in range(len(plans)):
self.do_test(plans[i])
if __name__ == '__main__':
unittest.main()
|
zzz001
|
/zzz001-0.0.4.tar.gz/zzz001-0.0.4/test/test_sgtn_client.py
|
test_sgtn_client.py
|
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup
setup(
name='zzzPyPiTest',
version='0.0.2',
author='zhaozizhe',
author_email='[email protected]',
url='https://github.com/namezzz/PO-DRL',
description=u'pypi测试',
packages=['test'],
install_requires=[],
entry_points={
'console_scripts': [
'helloTest=test:hello',
'version=test:version'
]
}
)
|
zzzPyPiTest
|
/zzzPyPiTest-0.0.2.tar.gz/zzzPyPiTest-0.0.2/setup.py
|
setup.py
|
#!/usr/bin/env python
# encoding=utf-8
def hello():
print('hello PyPi!!')
def version():
print('version: 0.0.1!!!!!')
|
zzzPyPiTest
|
/zzzPyPiTest-0.0.2.tar.gz/zzzPyPiTest-0.0.2/test/__init__.py
|
__init__.py
|
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup
setup(
name='zzzPyPiTest2',
version='0.0.2',
author='zhaozizhe',
author_email='[email protected]',
url='https://github.com/namezzz/PO-DRL',
description=u'pypi测试',
packages=['test'],
install_requires=[],
entry_points={
'console_scripts': [
'helloTest=test:hello',
'version=test:version'
]
}
)
|
zzzPyPiTest2
|
/zzzPyPiTest2-0.0.2.tar.gz/zzzPyPiTest2-0.0.2/setup.py
|
setup.py
|
#!/usr/bin/env python
# encoding=utf-8
def hello():
print('hello PyPi!!')
def version():
print('version: 0.0.1!!!!!')
|
zzzPyPiTest2
|
/zzzPyPiTest2-0.0.2.tar.gz/zzzPyPiTest2-0.0.2/test/__init__.py
|
__init__.py
|
=============
zzzeeksphinx
=============
This is zzzeek's own Sphinx layout, used by SQLAlchemy.
This layout is first and foremost pulled in for the SQLAlchemy documentation
builds (and possibly other related projects).
.. note:: The stability of zzzeeksphinx is **not** guaranteed and APIs and
behaviors can change at any time. For use in other projects, please fork
and/or adapt any portion of useful code as needed.
Features include:
* Uses Mako templates instead of Jinja, for more programmatic capabilities
inside of templates.
* Layout includes an independently scrollable sidebar
* A unique (to Sphinx) "contextual" sidebar contents that shows the
current page in context with all sibling pages (like that of MySQL's docs).
This is a form of TOC that Sphinx doesn't typically have a lot of
capability to do (well it could, with some simple feature adds), but
IMO this kind of navigation is critical for very large and nested
documentation sets, so that the navbar stays relatively small yet provides
context as to where you are in the docs and what else is locally available.
* Modifications to autodoc which illustrate inherited classes, bases,
method documentation illustrates if a method is only inherited from the
base or overridden.
* A "dynamic base" feature that will, under ReadTheDocs, pull in optional
``.mako`` and ``.py`` files from the website of your choice
that will serve as an alternate base template and a source of extra
config setup, respectively, allowing the layout to be integrated into
the layout of an external site when viewing on the web.
* A "viewsource" extension that can provide highlighted sourcecode to any
Python file arbitrarily.
* SQLAlchemy-specific stuff, like the [SQL] popups, the dialect info
directives.
* scss support using pyscss.
Config
======
in conf.py, the extension is::
extensions = [
'zzzeeksphinx',
]
The theme is::
html_theme = 'zzzeeksphinx'
Other configs that SQLAlchemy has set up; these two are probably
needed::
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0.0"
release_date = "Not released"
Additional configs for the "dynamic site thing" look like::
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
Configs which do some last-minute translation of module names
when running autodoc to display API documentation::
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression"
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
|
zzzeeksphinx
|
/zzzeeksphinx-1.4.0.tar.gz/zzzeeksphinx-1.4.0/README.rst
|
README.rst
|
import os
import re
from setuptools import setup
v = open(
os.path.join(os.path.dirname(__file__), "zzzeeksphinx", "__init__.py")
)
VERSION = (
re.compile(r""".*__version__ = ["'](.*?)["']""", re.S)
.match(v.read())
.group(1)
)
v.close()
readme = os.path.join(os.path.dirname(__file__), "README.rst")
setup(
name="zzzeeksphinx",
version=VERSION,
description="Zzzeek's Sphinx Layout and Utilities.",
long_description=open(readme).read(),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Documentation",
],
keywords="Sphinx",
author="Mike Bayer",
author_email="[email protected]",
url="https://github.com/sqlalchemyorg/zzzeeksphinx",
license="MIT",
packages=["zzzeeksphinx"],
install_requires=[
"libsass",
"mako",
"requests",
"sphinx>=5.3.0,<6.3",
"sphinxcontrib-jquery",
],
include_package_data=True,
zip_safe=False,
entry_points={
"sphinx.html_themes": [
"zsbase = zzzeeksphinx.theme",
"zzzeeksphinx = zzzeeksphinx.theme",
"zsmako = zzzeeksphinx.theme",
],
"pygments.lexers": [
"pycon+sql = zzzeeksphinx.sqlformatter:PyConWithSQLLexer",
"python+sql = zzzeeksphinx.sqlformatter:PythonWithSQLLexer",
],
},
)
|
zzzeeksphinx
|
/zzzeeksphinx-1.4.0.tar.gz/zzzeeksphinx-1.4.0/setup.py
|
setup.py
|
ZzzFS: dataset management à la ZFS
ZzzFS ("snooze FS") brings a set of ZFS management commands to non-ZFS volumes,
turning any directory on a traditional filesystem into a zpool-like object.
Using only the Python standard library, ZzzFS can be useful to, for example,
test tools that use ZFS functionality on a system lacking real ZFS. Of course,
ZzzFS misses all of the low-level features underpinning true ZFS volumes:
checksumming, copy-on-write, etc.
Note that this is distinct from the ZFS feature allowing a zpool to be created
using a regular file as a vdev. ZzzFS translates commands into move/copy/symlink
operations in the original filesystem; it does not manage blocks in a virtual
disk.
This is a functional work in progress; don't trust any important data to it
just yet. The test suite covers the following features:
* create/destroy/list "filesystems" and "pools"
* clone/promote, send/receive, rollback, diff snapshots
* get/set/inherit attributes
* pool command history
Example usage::
$ zzzpool create mypool /tmp/pool
$ zzzpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypool - - - - ONLINE -
$ zzzfs create mypool/work
$ zzzfs create mypool/play
$ zzzfs snapshot mypool/work@yesterday
$ zzzfs list -t all
NAME USED AVAIL REFER MOUNTPOINT
mypool - - - /private/tmp/pool/mypool
mypool/play - - - /private/tmp/pool/mypool/play
mypool/work - - - /private/tmp/pool/mypool/work
mypool/work@yesterday - - - -
$ zzzfs send mypool/work@yesterday | zzzfs receive mypool/more_work
$ zzzpool history
History for 'mypool':
2015-01-13.22:32:38 zzzpool create mypool /tmp/pool
2015-01-13.22:32:50 zzzfs create mypool/work
2015-01-13.22:32:53 zzzfs create mypool/play
2015-01-13.22:32:56 zzzfs snapshot mypool/work@yesterday
2015-01-13.22:33:48 zzzfs receive mypool/more_work
For more details on real ZFS command usage, see the Oracle Solaris ZFS
Administration Guide (https://docs.oracle.com/cd/E26505_01/pdf/E37384.pdf).
Released under the CDDL v1.1 license. There's no original ZFS code present, but
it's only appropriate to pair "snooze" with "cuddle."
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/README
|
README
|
# encoding=utf-8
from setuptools import setup
setup(
name='zzzfs',
version='0.1.2',
description='Dataset management à la ZFS',
long_description=open('README').read(),
author='Daniel W. Steinbrook',
author_email='[email protected]',
url='https://github.com/steinbro/zzzfs',
license="CDDL",
keywords='zfs',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Filesystems',
],
packages=['libzzzfs', 'libzzzfs.cmd'],
test_suite='tests',
entry_points={
'console_scripts': [
'zzzfs = libzzzfs.cmd.zzzfs:main',
'zzzpool = libzzzfs.cmd.zzzpool:main',
],
},
)
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/setup.py
|
setup.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
def validate_component_name(component_name, allow_slashes=False):
'''Check that component name starts with an alphanumeric character, and
disalllow all non-alphanumeric characters except underscore, hyphen, colon,
and period in component names.
'''
allowed = ('_', '-', ':', '.')
if allow_slashes:
allowed += ('/',)
if len(component_name) == 0:
return False
if not component_name[0].isalnum():
return False
for c in component_name:
if c not in allowed and not c.isalnum():
return False
return True
class ZzzFSException(Exception):
pass
class PropertyList(object):
# Numeric columns are right-aligned when tabulated.
numeric_types = ['alloc', 'avail', 'cap', 'free', 'refer', 'size', 'used']
# synonymous field names
shorthand = {'available': 'avail', 'capacity': 'cap'}
def __str__(self):
return ','.join(self.items)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, str(self))
def __init__(self, user_string):
self.items = user_string.split(',')
def validate_against(self, acceptable):
# compare against a set of acceptable fields
for col in self.names:
if col not in acceptable:
raise ZzzFSException('%s: unrecognized property name' % col)
@property
def names(self):
# use shorthand name, if any, as canonical name
for col in self.items:
yield self.shorthand.get(col, col)
@property
def types(self):
# strings unless explicitly numeric
for col in self.names:
if col in self.numeric_types:
yield int
else:
yield str
class PropertyAssignment(object):
'''property=value command-line argument, as used in zzzfs set command.'''
def __init__(self, user_string):
try:
self.key, self.val = user_string.split('=')
except ValueError:
raise ZzzFSException(
'%r: invalid property=value format' % user_string)
if not validate_component_name(self.key):
raise ZzzFSException('%s: invalid property' % self.key)
self.user_string = user_string
def __str__(self):
return self.user_string
def tabulated(data, headers, scriptable_mode=False, sort_asc=[], sort_desc=[]):
'''Generates a printable table as a string given data (an array of dicts)
and an array of field names for headers.
'''
if len(data) == 0:
return ''
types = list(headers.types)
names = list(headers.names)
row_format = '\t'.join('%s' for i in range(len(names)))
if not scriptable_mode:
# For evenly-spaced columns, left-align each text field (right-align
# each numeric field) in a cell that's big enough for the longest value
# in each column.
data_and_headers = data + [dict(zip(names, names))]
cells = []
for i in range(len(names)):
box_width = max(len(r.get(names[i]) or '-') for r in data_and_headers)
if types[i] == str:
box_width *= -1 # negative field width means left-align
cells.append('%%%ds' % box_width)
row_format = '\t'.join(cells)
# sort by specified fields, if any
for field in sort_asc + sort_desc:
if field not in names:
raise ZzzFSException('%s: no such column' % field)
for field in sort_asc:
data = sorted(data, key=lambda row: row[field])
for field in sort_desc:
data = list(reversed(sorted(data, key=lambda row: row[field])))
# Add individual data rows.
output = '\n'.join(
row_format % tuple(row.get(names[i]) or '-' for i in range(len(names)))
for row in data)
# Prepend header row in all caps.
if not scriptable_mode:
output = row_format % tuple(h.upper() for h in names) + '\n' + output
return output
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/util.py
|
util.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
from libzzzfs.dataset import Pool
from libzzzfs.util import tabulated, ZzzFSException
def create(pool_name, disk):
'''Add a pool in the specified directory.'''
pool = Pool(pool_name, should_exist=False)
pool.create(disk)
return pool
def destroy(pool_name):
'''Remove a pool.'''
Pool(pool_name, should_exist=True).destroy()
def history(pool_names, long_format):
pools = Pool.all()
if pool_names:
pools = [Pool(p, should_exist=True) for p in pool_names]
output = []
for pool in pools:
# Each pool will have at least one history record (zzzpool create).
output.append('History for %r:' % pool.name)
output += pool.get_history(long_format)
return '\n'.join(output)
def list(pool_name, headers, scriptable_mode):
'''List all pools.'''
headers.validate_against([
'name', 'size', 'alloc', 'free', 'cap', 'health', 'altroot'])
pools = Pool.all()
if pool_name:
pools = [Pool(pool_name, should_exist=True)]
return tabulated(
[{'name': p.name, 'health': 'ONLINE'} for p in pools], headers,
scriptable_mode)
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/zpool.py
|
zpool.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
#
# ZzzFS strucutre:
#
# <ZZZFS_ROOT>/
# <pool_name>/
# data -> <disk>
# properties/
# filesystems/
# <fs_name>/
# data -> ../data/<fs_name>/
# properties/
# snapshots/
# <snapshot_name>/
# data/
# properties/
# [...]
# <fs_name>%<sub_fs_name>/
# data -> ../data/<fs_name>/<sub_fs_name>/
# properties/
# snapshots/
# [...]
# [...]
import io
import os
import csv
import pwd
import gzip
import time
import shutil
import logging
import tarfile
import datetime
import platform
from libzzzfs.util import validate_component_name, ZzzFSException
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
ZZZFS_DEFAULT_ROOT = os.path.expanduser('~/.zzzfs')
def get_dataset_by(dataset_name, should_be=None, should_exist=True):
'''Handle user-specified dataset name, returning a Filesystem or Snapshot
based on the name. If should_be is specified, an exception is raised if the
dataset is not an instance of the specified class. If should_exist is
False/True, an exception is raised if the dataset does/does not already
exist; no check is performed if should_exist is None.
'''
# validate dataset identifier
filesystem_name = dataset_name
snapshot_name = None
# distinguish between "fs_name" and "fs_name@snapshot"
if dataset_name.count('@') == 1:
filesystem_name, snapshot_name = dataset_name.split('@', 1)
if not validate_component_name(filesystem_name, allow_slashes=True):
raise ZzzFSException('%s: invalid dataset identifier' % dataset_name)
obj = Filesystem(dataset_name)
if snapshot_name:
if not validate_component_name(snapshot_name):
raise ZzzFSException('%s: invalid snapshot name' % snapshot_name)
obj = Snapshot(filesystem_name, snapshot_name)
if should_be:
if not isinstance(obj, should_be):
raise ZzzFSException(
'%s: not a %s' % (dataset_name, should_be.__name__.lower()))
if should_exist and not obj.exists():
raise ZzzFSException('%s: no such dataset' % dataset_name)
if obj.exists() and should_exist == False:
raise ZzzFSException('%s: dataset exists' % dataset_name)
# pool should exist, even if dataset itself shouldn't
#logger.debug('%s, in pool %s', obj, obj.pool)
if not obj.pool.exists():
raise ZzzFSException('%s: no such pool' % obj.pool.name)
return obj
def get_all_datasets(identifiers, types, recursive, max_depth):
'''Get all datasets matching the given identifier names and dataset types,
and optionally all or a generational subset of their descendants.
'''
types.validate_against(['all', 'filesystem', 'snapshot', 'snap'])
# start with set of all filesystems and snapshots
filesystems = [f for p in Pool.all() for f in p.get_filesystems()]
snapshots = [s for f in filesystems for s in f.get_snapshots()]
datasets = filesystems + snapshots
# filter to specific identifiers if requested
if identifiers:
datasets = [get_dataset_by(i) for i in identifiers]
# add children of specified identifiers, if requested
if recursive or max_depth:
children = []
for d in datasets:
if isinstance(d, Filesystem):
children += d.get_children(max_depth)
datasets += children
# add any snapshots of identifiers and their descendants
# it's safe to modify the list as we iterate, because we're only adding
# snapshots, not filesystems
for d in datasets:
if isinstance(d, Filesystem):
datasets += d.get_snapshots()
# filter out filesystems, if not requested
if not any(t in ('all', 'filesystem') for t in types.items):
datasets = [d for d in datasets if not isinstance(d, Filesystem)]
# filter out snapshots, if not requested
if not any(t in ('all', 'snapshot', 'snap') for t in types.items):
datasets = [d for d in datasets if not isinstance(d, Snapshot)]
return datasets
class Dataset(object):
'''Base class for Pool, Filesystem, and Snapshot. Contains methods that
apply to all three objects.
'''
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def properties(self):
return os.path.join(self.root, 'properties')
@property
def data(self):
return os.path.join(self.root, 'data')
@property
def base_attrs(self):
return {'name': self.name}
@property
def creation(self):
# On POSIX systems, ctime is metadata change time, not file creation
# time, but these should be the same value for our dataset roots.
try:
return time.ctime(os.path.getctime(self.root))
except OSError: # dataset is currently being destroyed, perhaps
return None
def get_parent(self):
if '/' in self.name:
return Filesystem(self.name.rsplit('/', 1)[-2])
return Pool(self.name)
def get_local_properties(self):
attrs = self.base_attrs
try:
keys = os.listdir(self.properties)
except OSError:
# no local attributes
return attrs
for key in keys:
with open(os.path.join(self.properties, key), 'r') as f:
attrs[key] = f.read()
#logger.debug('%s local attributes: %s', self.name, attrs)
return attrs
def get_inherited_properties(self):
attrs = {}
local_attrs = self.get_local_properties()
# inherit values for any attributes not overridden locally, bottom-up
parent = self
while parent.get_parent():
parent = parent.get_parent()
for key, val in parent.get_local_properties().items():
if key not in attrs and key not in local_attrs:
attrs[key] = val
return attrs
def add_local_property(self, key, val):
if not os.path.exists(self.properties):
os.makedirs(self.properties)
if '/' in key:
raise ZzzFSException('%s: invalid property' % key)
with open(os.path.join(self.properties, key), 'w') as f:
f.write(val)
def get_property_and_source(self, key):
local = self.get_local_properties()
if key in local:
return (local[key], 'local')
inherited = self.get_inherited_properties()
if key in inherited:
return (inherited[key], 'inherited')
# property not found
return (None, None)
def get_property(self, key):
val, _ = self.get_property_and_source(key)
return val
def remove_local_property(self, key):
if self.get_property_and_source(key)[1] == 'local':
os.remove(os.path.join(self.properties, key))
return True
else:
# property did not exist, or is not local
return False
class Pool(Dataset):
def __init__(self, name, should_exist=None):
self.name = name
zzzfs_root = os.environ.get('ZZZFS_ROOT', ZZZFS_DEFAULT_ROOT)
if not os.path.exists(zzzfs_root):
os.makedirs(zzzfs_root)
self.root = os.path.join(zzzfs_root, self.name)
self.filesystems = os.path.join(self.root, 'filesystems')
self.history = os.path.join(self.root, 'history')
if should_exist and not self.exists():
raise ZzzFSException('%s: no such pool' % self.name)
if should_exist == False and self.exists():
raise ZzzFSException('%s: pool exists' % self.name)
def get_parent(self):
# pool is the top-most desendent of any dataset
return None
@classmethod
def all(self):
# return an array of all Pool objects
try:
return [Pool(name) for name in os.listdir(
os.environ.get('ZZZFS_ROOT', ZZZFS_DEFAULT_ROOT))]
except OSError:
# zzzfs_root doesn't exist, so no pools have been created
return []
def exists(self):
return self.name in os.listdir(
os.environ.get('ZZZFS_ROOT', ZZZFS_DEFAULT_ROOT))
def create(self, disk):
if os.path.exists(disk) and len(os.listdir(disk)) != 0:
raise ZzzFSException('%s: disk in use' % self.name)
os.makedirs(self.root)
pool_target = os.path.join(os.path.abspath(disk), self.name)
os.makedirs(pool_target)
os.symlink(pool_target, self.data)
# create initial root filesystem for this pool
Filesystem(self.name).create()
def destroy(self):
if os.path.exists(os.path.realpath(self.data)):
shutil.rmtree(os.path.realpath(self.data))
shutil.rmtree(self.root)
def get_filesystems(self):
try:
fs = os.listdir(self.filesystems)
except OSError: # dataset is currently being destroyed, perhaps
return
for x in fs:
# unescape slashes when instantiating Filesystem object
yield Filesystem(x.replace('%', '/'))
def get_history(self, long_format=False):
try:
with open(self.history, 'r') as f:
history = csv.reader(f)
for (date, command, user, host) in history:
if long_format:
yield '%s %s [user %s on %s]' % (date, command, user, host)
else:
yield '%s %s' % (date, command)
except IOError:
# no logged history
pass
def log_history_event(self, argv, date=None, user=None, host=None):
command = ' '.join(argv)
if not date: # default date is now
date = datetime.datetime.now()
if not user: # default user is user executing this script
user = pwd.getpwuid(os.getuid()).pw_name
if not host: # default host is the current platform host
host = platform.node()
with open(self.history, 'a') as f:
history = csv.writer(f)
history.writerow(
[date.strftime('%Y-%m-%d.%H:%M:%S'), command, user, host])
class Filesystem(Dataset):
def __init__(self, filesystem):
# need to escape slashes to use filesystem name as file name
self.name = filesystem
self.safe_name = self.name.replace('/', '%')
# get pool name by walking up tree
obj = self
while obj.get_parent():
obj = obj.get_parent()
self.pool = Pool(obj.name)
self.poolless_name = self.name[len(self.pool.name)+1:]
self.root = os.path.join(self.pool.root, 'filesystems', self.safe_name)
self.snapshots = os.path.join(self.root, 'snapshots')
@property
def mountpoint(self):
# before the filesystem is created, the symlink doesn't resolve, so
# this is a method that recomputes te property whenever it is accessed
try:
return os.path.realpath(self.data)
except OSError: # dataset is currently being destroyed, perhaps
return None
@property
def base_attrs(self):
data = super(Filesystem, self).base_attrs
data['mountpoint'] = self.mountpoint
data['creation'] = self.creation
return data
def exists(self):
return os.path.exists(self.root)
def get_children(self, max_depth=0): # 0 = all descendants
children = [
f for f in self.pool.get_filesystems()
if f.name.startswith(self.name + '/')]
#logger.debug('%s children: %s', self, children)
if max_depth > 0:
# use number of slashes to count depth
depth = max_depth + self.name.count('/')
children = [f for f in children if f.name.count('/') <= depth]
return children
def get_snapshots(self):
try:
snaps = os.listdir(self.snapshots)
except OSError: # dataset is currently being destroyed, perhaps
return
for x in snaps:
yield Snapshot(self.name, x)
def create(self, create_parents=False, from_stream=None):
if not self.get_parent().exists():
if create_parents:
#logger.debug('%s: need to create %s', self, self.get_parent())
self.get_parent().create(create_parents=True)
else:
raise ZzzFSException(
'%s: parent filesystem missing' % self.name)
# create relative symlink into pool data
target = os.path.join('..', '..', 'data', self.poolless_name)
try:
os.makedirs(os.path.join(self.root, target))
except OSError:
# already exists
pass
os.symlink(target, self.data)
os.makedirs(self.properties)
os.makedirs(self.snapshots)
#logger.debug('%s: pointed %s at %s', self, self.data, target)
if from_stream:
# for receive command: inverse of Snapshot.to_stream
try:
# gzip needs a seekable object, not a stream
#XXX this entails fitting the entire snapshot itno memeory
buf = io.BytesIO(from_stream.read())
buf.seek(0)
with gzip.GzipFile(fileobj=buf) as g:
with tarfile.TarFile(fileobj=g) as t:
#logger.debug('files in stream: %s', t.getnames())
# extract into snapshots directory
t.extractall(self.snapshots)
# "rollback" filesystem to snapshot just received
self.rollback_to(
Snapshot(self.name, os.listdir(self.snapshots)[0]))
except Exception as e:
# if anything goes wrong, destroy target filesystem and exit
self.destroy()
raise ZzzFSException(e)
#logger.debug(
# 'after creating %s, filesystems in %s: %s', self, self.pool,
# self.pool.get_filesystems())
def destroy(self, recursive=False):
dependencies = [
f for f in self.pool.get_filesystems()
if f.name.startswith(self.name + '/')]
#logger.debug('%s dependencies: %s', self, dependencies)
if len(dependencies) > 0 and not recursive:
raise ZzzFSException(
'cannot destroy %r: filesystem has children\n'
'use \'-r\' to destroy the following datasets:\n'
'%s' % (self.name, '\n'.join(f.name for f in dependencies)))
# user may have already deleted data
if os.path.exists(self.mountpoint):
shutil.rmtree(self.mountpoint)
shutil.rmtree(self.root)
# delete any child filesystems
for f in dependencies:
f.destroy(recursive)
def rollback_to(self, snapshot):
shutil.rmtree(self.mountpoint)
shutil.copytree(snapshot.data, self.mountpoint)
# restore any local properties
if os.path.exists(snapshot.properties):
shutil.rmtree(self.properties)
shutil.copytree(snapshot.properties, self.properties)
def rename(self, new_dataset):
# re-create relative symlink into pool data
target = os.path.join('..', '..', 'data', new_dataset.poolless_name)
try:
os.makedirs(os.path.join(new_dataset.root, target))
except OSError:
# already exists
pass
# move each component individually
os.symlink(target, new_dataset.data)
# shutil.move treats destination as parent if it is a directory
#logger.debug(
# '%s: %s -> %s', self, self.mountpoint, new_dataset.mountpoint)
os.rmdir(new_dataset.mountpoint)
shutil.move(self.mountpoint, new_dataset.mountpoint)
shutil.move(self.properties, new_dataset.root)
shutil.move(self.snapshots, new_dataset.root)
# all data has been moved
self.destroy()
class Snapshot(Dataset):
def __init__(self, filesystem, snapshot):
self.filesystem = Filesystem(filesystem)
self.name = snapshot
self.full_name = '%s@%s' % (filesystem, snapshot)
self.root = os.path.join(self.filesystem.root, 'snapshots', self.name)
self.pool = self.filesystem.pool
@property
def base_attrs(self):
data = super(Snapshot, self).base_attrs
data['name'] = self.full_name
data['creation'] = self.creation
return data
def exists(self):
return os.path.exists(self.root)
def create(self):
os.makedirs(self.root)
shutil.copytree(self.filesystem.data, self.data)
if os.path.exists(self.filesystem.properties):
shutil.copytree(self.filesystem.properties, self.properties)
else:
# no local properties associated with current working filesystem;
# use an empty directory for the snapshot's filesystem
os.makedirs(self.properties)
def rename(self, new_snapshot):
os.rename(self.root, new_snapshot.root)
def clone_to(self, new_filesystem):
new_filesystem.create()
#logger.debug('%s: cloning to %s', self, new_filesystem.mountpoint)
# remove folders to be replaced by copytree
#logger.debug(
# '%s: %s -> %s', self, self.data, new_filesystem.mountpoint)
os.rmdir(new_filesystem.mountpoint)
os.rmdir(new_filesystem.properties)
shutil.copytree(self.data, new_filesystem.mountpoint)
shutil.copytree(self.properties, new_filesystem.properties)
def to_stream(self, stream):
# write a gzipped tar of the snapshot to the stream
with gzip.GzipFile(fileobj=stream, mode='w') as g:
with tarfile.open(fileobj=g, mode='w') as t:
t.add(self.root, arcname=self.name)
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/dataset.py
|
dataset.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
import os
import sys
import shutil
import filecmp
from libzzzfs.dataset import (
get_all_datasets, get_dataset_by, Filesystem, Pool, Snapshot)
from libzzzfs.util import tabulated, validate_component_name, ZzzFSException
# Each method returns a string to be written to stdout, or a dataset (or list
# of datasets) affected by the command.
def clone(snapshot, filesystem):
'''Turn a snapshot into a filesystem with a new name.'''
dataset1 = get_dataset_by(snapshot, should_be=Snapshot)
dataset2 = get_dataset_by(
filesystem, should_be=Filesystem, should_exist=False)
dataset1.clone_to(dataset2)
dataset2.add_local_property('origin', dataset1.full_name)
return [dataset1, dataset2]
def create(filesystem, create_parents, properties):
'''Create a filesystem.'''
dataset = get_dataset_by(
filesystem, should_be=Filesystem, should_exist=False)
dataset.create(create_parents)
for keyval in properties:
dataset.add_local_property(keyval.key, keyval.val)
return dataset
def destroy(filesystem, recursive):
'''Remove a filesystem.'''
dataset = get_dataset_by(filesystem, should_be=Filesystem)
dataset.destroy(recursive)
return dataset
def diff(identifier, other_identifier):
'''Diff a snapshot against another snapshot in the same filesystem, or
against the current working filesystem.
'''
dataset1 = get_dataset_by(identifier, should_be=Snapshot)
if other_identifier is not None:
dataset2 = get_dataset_by(other_identifier)
else:
# compare against current version of filesystem
dataset2 = dataset1.filesystem
# real ZFS can't diff snapshots in different filesystem; not so in ZzzFS
#if isinstance(dataset2, Filesystem) and (
# dataset1.filesystem.name != dataset2.filesystem.name):
# raise ZzzFSException(
# '%s: cannot compare to a different filesystem' % identifier)
output = []
def do_diff(dcmp):
# trim off pool root from diff output
base_path = dcmp.left[len(dataset1.data)+1:]
for name in dcmp.diff_files:
output.append('M\t%s' % os.path.join(base_path, name))
for name in dcmp.left_only:
output.append('-\t%s' % os.path.join(base_path, name))
for name in dcmp.right_only:
output.append('+\t%s' % os.path.join(base_path, name))
for sub_dcmp in dcmp.subdirs.values():
do_diff(sub_dcmp)
do_diff(filecmp.dircmp(dataset1.data, dataset2.data))
return '\n'.join(output)
def get(properties, identifiers, headers, sources, scriptable_mode, recursive,
max_depth, types):
'''Get a set of properties for a set of datasets.'''
all_headers = ['name', 'property', 'value', 'source']
if headers.items == ['all']:
headers.items = all_headers
headers.validate_against(all_headers)
sources.validate_against(['local', 'inherited'])
attrs = []
for dataset in get_all_datasets(identifiers, types, recursive, max_depth):
if properties.items == ['all']:
if 'local' in sources.items:
for key, val in dataset.get_local_properties().items():
attrs.append({
'name': dataset.name, 'property': key, 'value': val,
'source': 'local'})
if 'inherited' in sources.items:
for key, val in dataset.get_inherited_properties().items():
attrs.append({
'name': dataset.name, 'property': key, 'value': val,
'source': 'inherited'})
else:
for p in properties.items:
val, source = dataset.get_property_and_source(p)
if source in sources.items:
attrs.append({
'name': dataset.name, 'property': p, 'value': val,
'source': source})
return tabulated(attrs, headers, scriptable_mode)
def inherit(property, identifiers):
'''Remove a local property from a set of datasets.'''
if not validate_component_name(property):
raise ZzzFSException('%s: invalid property' % property)
datasets = [get_dataset_by(identifier) for identifier in identifiers]
for dataset in datasets:
try:
os.remove(os.path.join(dataset.properties, property))
except OSError:
# property was not set locally
pass
return datasets
def list(identifiers, types, scriptable_mode, headers, recursive, max_depth,
sort_asc, sort_desc):
'''Tabulate a set of properties for a set of datasets.'''
records = []
for d in get_all_datasets(identifiers, types, recursive, max_depth):
records.append(dict((h, d.get_property(h)) for h in headers.names))
return tabulated(records, headers, scriptable_mode, sort_asc, sort_desc)
def promote(clone_filesystem):
'''Turn a cloned snapshot into a standalone filesystem.'''
# Since there are no actual dependencies in ZzzFS, simply unset 'origin'.
dataset = get_dataset_by(
clone_filesystem, should_be=Filesystem, should_exist=True)
dataset.remove_local_property('origin')
return dataset
def receive(filesystem, stream=sys.stdin):
'''Create a new filesystem pre-populated with the contens of a snapshot
sent via zzzfs send piped through stdin.
'''
dataset = get_dataset_by(
filesystem, should_be=Filesystem, should_exist=False)
dataset.create(from_stream=stream)
return dataset
def rename(identifier, other_identifier):
'''Move or rename the dataset.'''
dataset1 = get_dataset_by(identifier)
dataset2 = None # may be filesystem or snapshot, will check below
if isinstance(dataset1, Snapshot):
if not '@' in other_identifier:
# second argument might be snapshot alone, which we'd interpret as
# a filesystem; e.g. "rename fs@snapshot new_snapshot"
other_identifier = '%s@%s' % (
dataset1.filesystem.name, other_identifier)
# re-identify with should_exist
dataset2 = get_dataset_by(
other_identifier, should_be=Snapshot, should_exist=False)
# both snapshots
if dataset1.filesystem.name != dataset2.filesystem.name:
raise ZzzFSException('mismatched filesystems')
else: # dataset1 is a filesystem
dataset2 = get_dataset_by(
other_identifier, should_be=Filesystem, should_exist=False)
if dataset1.pool.name != dataset2.pool.name:
raise ZzzFSException('cannot rename to different pool')
# same procedure whether filesystem or snapshot
dataset1.rename(dataset2)
return [dataset1, dataset2]
def rollback(snapshot):
'''Replace the filesystem with the contents of the spceified snapshot.'''
dataset = get_dataset_by(snapshot, should_be=Snapshot)
dataset.filesystem.rollback_to(dataset)
return dataset
def send(snapshot, stream=sys.stdout):
'''Create a gzipped tarball of a snapshot and write it to sdout.'''
dataset = get_dataset_by(snapshot, should_be=Snapshot)
dataset.to_stream(stream)
return dataset
def set(keyval, identifiers):
'''Set a property value for a set of datasets.'''
datasets = [get_dataset_by(identifier) for identifier in identifiers]
for dataset in datasets:
dataset.add_local_property(keyval.key, keyval.val)
return datasets
def snapshot(snapshots, properties):
'''Create a snapshot of a filesystem.'''
for i in snapshots:
dataset = get_dataset_by(i, should_be=Snapshot, should_exist=False)
if not dataset.filesystem.exists():
raise ZzzFSException(
'%s: no such filesystem' % dataset.filesystem.name)
dataset.create()
for keyval in properties:
dataset.add_local_property(keyval.key, keyval.val)
yield dataset
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/zfs.py
|
zfs.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
import argparse
from libzzzfs.util import PropertyAssignment, PropertyList
class CommandInterpreter(object):
'''Base class for ZzzfsCommandInterpreter/ZzzpoolCommandInterpreter'''
def __init__(self, argv):
self.parser = argparse.ArgumentParser()
self.interpret()
# generate dict of argument keys/values
self.args = self.parser.parse_args(argv)
self.params = dict(self.args._get_kwargs())
del self.params['command']
class ZzzfsCommandInterpreter(CommandInterpreter):
def interpret(self):
subparsers = self.parser.add_subparsers(
dest='command', title='subcommands')
# per-command arguments
clone = subparsers.add_parser(
'clone', help='turn a snapshot into a filesystem with a new name')
clone.add_argument('snapshot')
clone.add_argument('filesystem')
create = subparsers.add_parser('create', help='create a filesystem')
create.add_argument('filesystem')
create.add_argument(
'-p', action='store_true', dest='create_parents',
help='create missing parent filesystems')
create.add_argument(
'-o', metavar='property=value', action='append', dest='properties',
default=[], type=PropertyAssignment,
help='set the specified property')
destroy = subparsers.add_parser('destroy', help='destroy a filesystem')
destroy.add_argument('filesystem')
destroy.add_argument(
'-r', action='store_true', dest='recursive',
help='destroy child filesystems')
diff = subparsers.add_parser(
'diff', help='compare filesystem/snapshot against a snapshot')
diff.add_argument('identifier', metavar='snapshot')
diff.add_argument(
'other_identifier', metavar='snapshot|filesystem', nargs='?')
get = subparsers.add_parser('get', help='get dataset properties')
recursive_or_depth = get.add_mutually_exclusive_group()
recursive_or_depth.add_argument(
'-r', action='store_true', dest='recursive',
help='display all children')
recursive_or_depth.add_argument(
'-d', metavar='depth', type=int, dest='max_depth', default=0,
help='number of child generations to display')
get.add_argument(
'properties', metavar='all | property[,property...]',
type=PropertyList, help='comma-separated list of properties')
get.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='+')
get.add_argument(
'-H', action='store_true', dest='scriptable_mode',
help='scripted mode (no headers, tab-delimited)')
get.add_argument(
'-o', metavar='all | field[,field...]', type=PropertyList,
default=PropertyList('all'), dest='headers',
help='comma-separated list of fields (name, property, value, source)')
get.add_argument(
'-t', metavar='type[,type...]', dest='types', type=PropertyList,
default=PropertyList('filesystem'),
help='comma-separated list of types (all, filesystem, snapshot)')
get.add_argument(
'-s', metavar='source[,source...]', type=PropertyList,
dest='sources', default=PropertyList('local,inherited'),
help='comma-separated list of sources (local, inherited)')
inherit = subparsers.add_parser(
'inherit', help='unset a property from datasets')
inherit.add_argument('property')
inherit.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='+')
list_ = subparsers.add_parser('list', help='list datasets')
recursive_or_depth = list_.add_mutually_exclusive_group()
recursive_or_depth.add_argument(
'-r', action='store_true', dest='recursive',
help='display all children')
recursive_or_depth.add_argument(
'-d', metavar='depth', type=int, dest='max_depth', default=0,
help='number of child generations to display')
list_.add_argument(
'-H', action='store_true', dest='scriptable_mode',
help='scripted mode (no headers, tab-delimited)')
list_.add_argument(
'-o', metavar='property[,property...]', dest='headers',
type=PropertyList, help='comma-separated list of properties',
default=PropertyList('name,used,available,refer,mountpoint'))
list_.add_argument(
'-t', metavar='type[,type...]', dest='types', type=PropertyList,
default=PropertyList('filesystem'),
help='comma-separated list of types (all, filesystem, snapshot)')
list_.add_argument(
'-s', metavar='property', dest='sort_asc', action='append',
default=[], help='sort by property (ascending)')
list_.add_argument(
'-S', metavar='property', dest='sort_desc', action='append',
default=[], help='sort by property (descending)')
list_.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='*')
promote = subparsers.add_parser(
'promote',
help='turn a cloned snapshot into a standalone filesystem')
promote.add_argument('clone_filesystem')
receive = subparsers.add_parser(
'receive', help='create a new filesystem from "zzzfs send" output')
receive.add_argument('filesystem')
rename = subparsers.add_parser(
'rename', help='move or rename a dataset')
rename.add_argument('identifier', metavar='filesystem|snapshot')
rename.add_argument('other_identifier', metavar='filesystem|snapshot')
rollback = subparsers.add_parser(
'rollback', help='replace a filesystem with a snapshot')
rollback.add_argument('snapshot')
send = subparsers.add_parser(
'send', help='serialize snapshot into a data stream')
send.add_argument('snapshot')
set_ = subparsers.add_parser(
'set', help='set a property value for a dataset')
set_.add_argument(
'keyval', metavar='property=value', type=PropertyAssignment)
set_.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='+')
snap = subparsers.add_parser(
'snapshot', help='create snapshots of filesystems')
snap.add_argument('snapshots', metavar='filesystem@snapname', nargs='+')
snap.add_argument(
'-o', metavar='property=value', action='append', dest='properties',
default=[], type=PropertyAssignment,
help='set the specified property')
class ZzzpoolCommandInterpreter(CommandInterpreter):
def interpret(self):
subparsers = self.parser.add_subparsers(
dest='command', title='subcommands')
# per-command arguments
create = subparsers.add_parser('create', help='create a pool')
create.add_argument('pool_name', metavar='pool', help='pool name')
create.add_argument('disk', help='directory in which to create pool')
destroy = subparsers.add_parser('destroy', help='destroy a pool')
destroy.add_argument('pool_name', metavar='pool', help='pool name')
history = subparsers.add_parser(
'history', help='display pool command history')
history.add_argument(
'pool_names', metavar='pool', nargs='*', default=[],
help='pool name')
history.add_argument(
'-l', action='store_true', dest='long_format',
help='show log records in long format')
list_ = subparsers.add_parser('list', help='list pools and properties')
list_.add_argument(
'pool_name', nargs='?', default=None, help='pool name')
list_.add_argument(
'-H', action='store_true', dest='scriptable_mode',
help='scripted mode (no headers, tab-delimited)')
list_.add_argument(
'-o', metavar='property[,...]', type=PropertyList, dest='headers',
default=PropertyList('name,size,alloc,free,cap,health,altroot'),
help='comma-separated list of properties')
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/interpreter.py
|
interpreter.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
import sys
from libzzzfs import zpool
from libzzzfs.dataset import Pool, ZzzFSException
from libzzzfs.interpreter import ZzzpoolCommandInterpreter
def zzzpool_main(argv):
cmd = ZzzpoolCommandInterpreter(argv[1:])
if cmd.args.command is None:
sys.exit(cmd.parser.print_usage())
retval = getattr(zpool, cmd.args.command)(**cmd.params)
if type(retval) is str:
return retval
if isinstance(retval, Pool) and cmd.args.command == 'create':
retval.log_history_event(argv)
def main():
try:
output = zzzpool_main(sys.argv)
except ZzzFSException as e:
sys.exit('%s: %s' % (sys.argv[0], e))
if output:
print(output)
if __name__ == '__main__':
main()
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/cmd/zzzpool.py
|
zzzpool.py
|
#!/usr/bin/env python2.7
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, version 1.1 (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at ./LICENSE.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at ./LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
import sys
from libzzzfs import zfs
from libzzzfs.dataset import Dataset, Pool
from libzzzfs.interpreter import ZzzfsCommandInterpreter
from libzzzfs.util import ZzzFSException
def zzzfs_main(argv):
cmd = ZzzfsCommandInterpreter(argv[1:])
if cmd.args.command is None:
sys.exit(cmd.parser.print_usage())
retval = getattr(zfs, cmd.args.command)(**cmd.params)
if type(retval) is str:
return retval
elif cmd.args.command not in ('diff', 'get', 'list', 'send'):
# pool-modifying commands; log in pool history
if isinstance(retval, Dataset):
retval.pool.log_history_event(argv)
else:
# multiple affected datasets; only log command once per pool
for pool_name in list(set(dataset.pool.name for dataset in retval)):
Pool(pool_name).log_history_event(argv)
def main():
try:
output = zzzfs_main(sys.argv)
except ZzzFSException as e:
sys.exit('%s: %s' % (sys.argv[0], e))
if output:
print(output)
if __name__ == '__main__':
main()
|
zzzfs
|
/zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/cmd/zzzfs.py
|
zzzfs.py
|
# zzzing CLI by xuanzhi33
|
zzzing
|
/zzzing-0.4.8.tar.gz/zzzing-0.4.8/README.md
|
README.md
|
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="zzzing",
version="0.4.8",
author="xuanzhi33",
author_email="[email protected]",
url="https://zzzing.cn",
description="zzzing CLI by xuanzhi33",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
],
license="GPL-3.0",
install_requires=["requests", "pyquery"],
packages=find_packages(),
python_requires=">=3.6",
entry_points={
"console_scripts": [
"zzzing=zzzing.__main__:main"
]
}
)
|
zzzing
|
/zzzing-0.4.8.tar.gz/zzzing-0.4.8/setup.py
|
setup.py
|
Requests: Python utils - Time
=========================
Get demand time from all kinds of original time format after delay without specified type.
Usage
---------------
def get_time(ts=None, delay=0, fmt=19)
Arguments: [original_time] [delay] [output_fmt:{0,6,8,10,16,17,19}]
output_fmt:
19: '%Y-%m-%d %H:%M:%S',
8: '%Y%m%d',
6: '%Y%m',
10: '%Y-%m-%d',
16: '%Y-%m-%d %H:%M',
17: '%Y%m%d-%H:%M:%S'
Input format(Any one of them)
------------
# 201809
# 20180910
# 2018-09-10
# 2018-09-10 18:00
# 20180910-18:00:00
# 2018-09-10 18:00:00
# 1536573600(int)
# 1536573600.00(float)
Return format(Any one of them)
-------------
# 201809
# 20180910
# 2018-09-10
# 2018-09-10 18:00
# 20180910-18:00:00
# 2018-09-10 18:00:00
# 1536573600(int)
|
zzzutils
|
/zzzutils-0.1.7.tar.gz/zzzutils-0.1.7/README.rst
|
README.rst
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='zzzutils',
version='0.1.7',
description='Time utils for Humans.',
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
author='ZhiZhi Zhang',
author_email='[email protected]',
url='https://github.com/zzzbit/zzz-utils',
packages=find_packages(),
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
)
|
zzzutils
|
/zzzutils-0.1.7.tar.gz/zzzutils-0.1.7/setup.py
|
setup.py
|
def totext():
print("to text")
|
zzzymobbe
|
/zzzymobbe-1.0-py3-none-any.whl/pdfproject/to_text.py
|
to_text.py
|
def toimg():
print("to boobs")
|
zzzymobbe
|
/zzzymobbe-1.0-py3-none-any.whl/pdfproject/to_image.py
|
to_image.py
|
def main():
print('zzzzz')
|
zzzz
|
/zzzz-0.0.3-py3-none-any.whl/zzzz.py
|
zzzz.py
|
def f(e):
print(e)
|
zzzzz
|
/zzzzz-1.0.0-py3-none-any.whl/zzzzz.py
|
zzzzz.py
|
def f(e):
print(e)
|
zzzzzzz
|
/zzzzzzz-1.0.0-py3-none-any.whl/zzzzzzz.py
|
zzzzzzz.py
|
from distutils.core import setup
setup(name='zzzzzzzz',
version='1.0.0',description='test',
author='Sifer',
author_email='[email protected]',
url='https://sife-shuo.github.io/',
py_modules = ['zzzzzzzz'],
)
|
zzzzzzzz
|
/zzzzzzzz-1.0.0.tar.gz/zzzzzzzz-1.0.0/setup.py
|
setup.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.