code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import zipfile
import os
import sys
def _zip_dir(path, zip_file, prefix):
path = path.rstrip('/\\')
for root, dirs, files in os.walk(path):
for file in files:
zip_file.write(os.path.join(root, file), os.path.join(
root.replace(path, prefix), file))
def main(args):
zip_file = zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED)
for path, archive_name in args.input_pairs:
if os.path.isdir(path):
_zip_dir(path, zip_file, archive_name)
else:
zip_file.write(path, archive_name)
zip_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This script creates zip files.')
parser.add_argument('-o', dest='output', action='store',
help='The name of the output zip file.')
parser.add_argument('-i', dest='input_pairs', nargs=2, action='append',
help='The input file and its destination location in the zip archive.')
sys.exit(main(parser.parse_args()))
| jamesr/sky_engine | build/zip.py | Python | bsd-3-clause | 1,167 |
from __future__ import absolute_import
import errno
import os
import sys
import signal
from celery import _find_option_with_arg
from celery import platforms
from celery.five import open_fqdn
from celery.platforms import (
get_fdmax,
ignore_errno,
set_process_title,
signals,
maybe_drop_privileges,
setuid,
setgid,
initgroups,
parse_uid,
parse_gid,
detached,
DaemonContext,
create_pidlock,
Pidfile,
LockFailed,
setgroups,
_setgroups_hack,
close_open_fds,
)
try:
import resource
except ImportError: # pragma: no cover
resource = None # noqa
from celery.tests.case import (
Case, WhateverIO, Mock, SkipTest,
call, override_stdouts, mock_open, patch,
)
class test_find_option_with_arg(Case):
def test_long_opt(self):
self.assertEqual(
_find_option_with_arg(['--foo=bar'], long_opts=['--foo']),
'bar'
)
def test_short_opt(self):
self.assertEqual(
_find_option_with_arg(['-f', 'bar'], short_opts=['-f']),
'bar'
)
class test_close_open_fds(Case):
def test_closes(self):
with patch('os.close') as _close:
with patch('os.closerange', create=True) as closerange:
with patch('celery.platforms.get_fdmax') as fdmax:
fdmax.return_value = 3
close_open_fds()
if not closerange.called:
_close.assert_has_calls([call(2), call(1), call(0)])
_close.side_effect = OSError()
_close.side_effect.errno = errno.EBADF
close_open_fds()
class test_ignore_errno(Case):
def test_raises_EBADF(self):
with ignore_errno('EBADF'):
exc = OSError()
exc.errno = errno.EBADF
raise exc
def test_otherwise(self):
with self.assertRaises(OSError):
with ignore_errno('EBADF'):
exc = OSError()
exc.errno = errno.ENOENT
raise exc
class test_set_process_title(Case):
def when_no_setps(self):
prev = platforms._setproctitle = platforms._setproctitle, None
try:
set_process_title('foo')
finally:
platforms._setproctitle = prev
class test_Signals(Case):
@patch('signal.getsignal')
def test_getitem(self, getsignal):
signals['SIGINT']
getsignal.assert_called_with(signal.SIGINT)
def test_supported(self):
self.assertTrue(signals.supported('INT'))
self.assertFalse(signals.supported('SIGIMAGINARY'))
def test_reset_alarm(self):
if sys.platform == 'win32':
raise SkipTest('signal.alarm not available on Windows')
with patch('signal.alarm') as _alarm:
signals.reset_alarm()
_alarm.assert_called_with(0)
def test_arm_alarm(self):
if hasattr(signal, 'setitimer'):
with patch('signal.setitimer', create=True) as seti:
signals.arm_alarm(30)
self.assertTrue(seti.called)
def test_signum(self):
self.assertEqual(signals.signum(13), 13)
self.assertEqual(signals.signum('INT'), signal.SIGINT)
self.assertEqual(signals.signum('SIGINT'), signal.SIGINT)
with self.assertRaises(TypeError):
signals.signum('int')
signals.signum(object())
@patch('signal.signal')
def test_ignore(self, set):
signals.ignore('SIGINT')
set.assert_called_with(signals.signum('INT'), signals.ignored)
signals.ignore('SIGTERM')
set.assert_called_with(signals.signum('TERM'), signals.ignored)
@patch('signal.signal')
def test_setitem(self, set):
handle = lambda *a: a
signals['INT'] = handle
set.assert_called_with(signal.SIGINT, handle)
@patch('signal.signal')
def test_setitem_raises(self, set):
set.side_effect = ValueError()
signals['INT'] = lambda *a: a
if not platforms.IS_WINDOWS:
class test_get_fdmax(Case):
@patch('resource.getrlimit')
def test_when_infinity(self, getrlimit):
with patch('os.sysconf') as sysconfig:
sysconfig.side_effect = KeyError()
getrlimit.return_value = [None, resource.RLIM_INFINITY]
default = object()
self.assertIs(get_fdmax(default), default)
@patch('resource.getrlimit')
def test_when_actual(self, getrlimit):
with patch('os.sysconf') as sysconfig:
sysconfig.side_effect = KeyError()
getrlimit.return_value = [None, 13]
self.assertEqual(get_fdmax(None), 13)
class test_maybe_drop_privileges(Case):
@patch('celery.platforms.parse_uid')
@patch('pwd.getpwuid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.setuid')
@patch('celery.platforms.initgroups')
def test_with_uid(self, initgroups, setuid, setgid,
getpwuid, parse_uid):
class pw_struct(object):
pw_gid = 50001
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
setuid.side_effect = raise_on_second_call
getpwuid.return_value = pw_struct()
parse_uid.return_value = 5001
maybe_drop_privileges(uid='user')
parse_uid.assert_called_with('user')
getpwuid.assert_called_with(5001)
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_has_calls([call(5001), call(0)])
@patch('celery.platforms.parse_uid')
@patch('celery.platforms.parse_gid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.setuid')
@patch('celery.platforms.initgroups')
def test_with_guid(self, initgroups, setuid, setgid,
parse_gid, parse_uid):
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
setuid.side_effect = raise_on_second_call
parse_uid.return_value = 5001
parse_gid.return_value = 50001
maybe_drop_privileges(uid='user', gid='group')
parse_uid.assert_called_with('user')
parse_gid.assert_called_with('group')
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_has_calls([call(5001), call(0)])
setuid.side_effect = None
with self.assertRaises(RuntimeError):
maybe_drop_privileges(uid='user', gid='group')
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EINVAL
with self.assertRaises(OSError):
maybe_drop_privileges(uid='user', gid='group')
@patch('celery.platforms.setuid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.parse_gid')
def test_only_gid(self, parse_gid, setgid, setuid):
parse_gid.return_value = 50001
maybe_drop_privileges(gid='group')
parse_gid.assert_called_with('group')
setgid.assert_called_with(50001)
self.assertFalse(setuid.called)
class test_setget_uid_gid(Case):
@patch('celery.platforms.parse_uid')
@patch('os.setuid')
def test_setuid(self, _setuid, parse_uid):
parse_uid.return_value = 5001
setuid('user')
parse_uid.assert_called_with('user')
_setuid.assert_called_with(5001)
@patch('celery.platforms.parse_gid')
@patch('os.setgid')
def test_setgid(self, _setgid, parse_gid):
parse_gid.return_value = 50001
setgid('group')
parse_gid.assert_called_with('group')
_setgid.assert_called_with(50001)
def test_parse_uid_when_int(self):
self.assertEqual(parse_uid(5001), 5001)
@patch('pwd.getpwnam')
def test_parse_uid_when_existing_name(self, getpwnam):
class pwent(object):
pw_uid = 5001
getpwnam.return_value = pwent()
self.assertEqual(parse_uid('user'), 5001)
@patch('pwd.getpwnam')
def test_parse_uid_when_nonexisting_name(self, getpwnam):
getpwnam.side_effect = KeyError('user')
with self.assertRaises(KeyError):
parse_uid('user')
def test_parse_gid_when_int(self):
self.assertEqual(parse_gid(50001), 50001)
@patch('grp.getgrnam')
def test_parse_gid_when_existing_name(self, getgrnam):
class grent(object):
gr_gid = 50001
getgrnam.return_value = grent()
self.assertEqual(parse_gid('group'), 50001)
@patch('grp.getgrnam')
def test_parse_gid_when_nonexisting_name(self, getgrnam):
getgrnam.side_effect = KeyError('group')
with self.assertRaises(KeyError):
parse_gid('group')
class test_initgroups(Case):
@patch('pwd.getpwuid')
@patch('os.initgroups', create=True)
def test_with_initgroups(self, initgroups_, getpwuid):
getpwuid.return_value = ['user']
initgroups(5001, 50001)
initgroups_.assert_called_with('user', 50001)
@patch('celery.platforms.setgroups')
@patch('grp.getgrall')
@patch('pwd.getpwuid')
def test_without_initgroups(self, getpwuid, getgrall, setgroups):
prev = getattr(os, 'initgroups', None)
try:
delattr(os, 'initgroups')
except AttributeError:
pass
try:
getpwuid.return_value = ['user']
class grent(object):
gr_mem = ['user']
def __init__(self, gid):
self.gr_gid = gid
getgrall.return_value = [grent(1), grent(2), grent(3)]
initgroups(5001, 50001)
setgroups.assert_called_with([1, 2, 3])
finally:
if prev:
os.initgroups = prev
class test_detached(Case):
def test_without_resource(self):
prev, platforms.resource = platforms.resource, None
try:
with self.assertRaises(RuntimeError):
detached()
finally:
platforms.resource = prev
@patch('celery.platforms._create_pidlock')
@patch('celery.platforms.signals')
@patch('celery.platforms.maybe_drop_privileges')
@patch('os.geteuid')
@patch(open_fqdn)
def test_default(self, open, geteuid, maybe_drop,
signals, pidlock):
geteuid.return_value = 0
context = detached(uid='user', gid='group')
self.assertIsInstance(context, DaemonContext)
signals.reset.assert_called_with('SIGCLD')
maybe_drop.assert_called_with(uid='user', gid='group')
open.return_value = Mock()
geteuid.return_value = 5001
context = detached(uid='user', gid='group', logfile='/foo/bar')
self.assertIsInstance(context, DaemonContext)
self.assertTrue(context.after_chdir)
context.after_chdir()
open.assert_called_with('/foo/bar', 'a')
open.return_value.close.assert_called_with()
context = detached(pidfile='/foo/bar/pid')
self.assertIsInstance(context, DaemonContext)
self.assertTrue(context.after_chdir)
context.after_chdir()
pidlock.assert_called_with('/foo/bar/pid')
class test_DaemonContext(Case):
@patch('os.fork')
@patch('os.setsid')
@patch('os._exit')
@patch('os.chdir')
@patch('os.umask')
@patch('os.close')
@patch('os.closerange')
@patch('os.open')
@patch('os.dup2')
def test_open(self, dup2, open, close, closer, umask, chdir,
_exit, setsid, fork):
x = DaemonContext(workdir='/opt/workdir', umask=0o22)
x.stdfds = [0, 1, 2]
fork.return_value = 0
with x:
self.assertTrue(x._is_open)
with x:
pass
self.assertEqual(fork.call_count, 2)
setsid.assert_called_with()
self.assertFalse(_exit.called)
chdir.assert_called_with(x.workdir)
umask.assert_called_with(0o22)
self.assertTrue(dup2.called)
fork.reset_mock()
fork.return_value = 1
x = DaemonContext(workdir='/opt/workdir')
x.stdfds = [0, 1, 2]
with x:
pass
self.assertEqual(fork.call_count, 1)
_exit.assert_called_with(0)
x = DaemonContext(workdir='/opt/workdir', fake=True)
x.stdfds = [0, 1, 2]
x._detach = Mock()
with x:
pass
self.assertFalse(x._detach.called)
x.after_chdir = Mock()
with x:
pass
x.after_chdir.assert_called_with()
class test_Pidfile(Case):
@patch('celery.platforms.Pidfile')
def test_create_pidlock(self, Pidfile):
p = Pidfile.return_value = Mock()
p.is_locked.return_value = True
p.remove_if_stale.return_value = False
with override_stdouts() as (_, err):
with self.assertRaises(SystemExit):
create_pidlock('/var/pid')
self.assertIn('already exists', err.getvalue())
p.remove_if_stale.return_value = True
ret = create_pidlock('/var/pid')
self.assertIs(ret, p)
def test_context(self):
p = Pidfile('/var/pid')
p.write_pid = Mock()
p.remove = Mock()
with p as _p:
self.assertIs(_p, p)
p.write_pid.assert_called_with()
p.remove.assert_called_with()
def test_acquire_raises_LockFailed(self):
p = Pidfile('/var/pid')
p.write_pid = Mock()
p.write_pid.side_effect = OSError()
with self.assertRaises(LockFailed):
with p:
pass
@patch('os.path.exists')
def test_is_locked(self, exists):
p = Pidfile('/var/pid')
exists.return_value = True
self.assertTrue(p.is_locked())
exists.return_value = False
self.assertFalse(p.is_locked())
def test_read_pid(self):
with mock_open() as s:
s.write('1816\n')
s.seek(0)
p = Pidfile('/var/pid')
self.assertEqual(p.read_pid(), 1816)
def test_read_pid_partially_written(self):
with mock_open() as s:
s.write('1816')
s.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(ValueError):
p.read_pid()
def test_read_pid_raises_ENOENT(self):
exc = IOError()
exc.errno = errno.ENOENT
with mock_open(side_effect=exc):
p = Pidfile('/var/pid')
self.assertIsNone(p.read_pid())
def test_read_pid_raises_IOError(self):
exc = IOError()
exc.errno = errno.EAGAIN
with mock_open(side_effect=exc):
p = Pidfile('/var/pid')
with self.assertRaises(IOError):
p.read_pid()
def test_read_pid_bogus_pidfile(self):
with mock_open() as s:
s.write('eighteensixteen\n')
s.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(ValueError):
p.read_pid()
@patch('os.unlink')
def test_remove(self, unlink):
unlink.return_value = True
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_ENOENT(self, unlink):
exc = OSError()
exc.errno = errno.ENOENT
unlink.side_effect = exc
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_EACCES(self, unlink):
exc = OSError()
exc.errno = errno.EACCES
unlink.side_effect = exc
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_OSError(self, unlink):
exc = OSError()
exc.errno = errno.EAGAIN
unlink.side_effect = exc
p = Pidfile('/var/pid')
with self.assertRaises(OSError):
p.remove()
unlink.assert_called_with(p.path)
@patch('os.kill')
def test_remove_if_stale_process_alive(self, kill):
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
kill.return_value = 0
self.assertFalse(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.read_pid.assert_called_with()
kill.side_effect = OSError()
kill.side_effect.errno = errno.ENOENT
self.assertFalse(p.remove_if_stale())
@patch('os.kill')
def test_remove_if_stale_process_dead(self, kill):
with override_stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
p.remove = Mock()
exc = OSError()
exc.errno = errno.ESRCH
kill.side_effect = exc
self.assertTrue(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.remove.assert_called_with()
def test_remove_if_stale_broken_pid(self):
with override_stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.side_effect = ValueError()
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
def test_remove_if_stale_no_pidfile(self):
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = None
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
@patch('os.fsync')
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
@patch(open_fqdn)
def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write('1816\n')
r.seek(0)
p = Pidfile('/var/pid')
p.write_pid()
w.seek(0)
self.assertEqual(w.readline(), '1816\n')
self.assertTrue(w.close.called)
getpid.assert_called_with()
osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS,
platforms.PIDFILE_MODE)
fdopen.assert_called_with(13, 'w')
fsync.assert_called_with(13)
open_.assert_called_with(p.path)
@patch('os.fsync')
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
@patch(open_fqdn)
def test_write_reread_fails(self, open_, fdopen,
osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write('11816\n')
r.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(LockFailed):
p.write_pid()
class test_setgroups(Case):
@patch('os.setgroups', create=True)
def test_setgroups_hack_ValueError(self, setgroups):
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise ValueError()
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
setgroups.side_effect = ValueError()
with self.assertRaises(ValueError):
_setgroups_hack(list(range(400)))
@patch('os.setgroups', create=True)
def test_setgroups_hack_OSError(self, setgroups):
exc = OSError()
exc.errno = errno.EINVAL
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise exc
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
setgroups.side_effect = exc
with self.assertRaises(OSError):
_setgroups_hack(list(range(400)))
exc2 = OSError()
exc.errno = errno.ESRCH
setgroups.side_effect = exc2
with self.assertRaises(OSError):
_setgroups_hack(list(range(400)))
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups(self, hack, sysconf):
sysconf.return_value = 100
setgroups(list(range(400)))
hack.assert_called_with(list(range(100)))
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_sysconf_raises(self, hack, sysconf):
sysconf.side_effect = ValueError()
setgroups(list(range(400)))
hack.assert_called_with(list(range(400)))
@patch('os.getgroups')
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
esrch = OSError()
esrch.errno = errno.ESRCH
hack.side_effect = esrch
with self.assertRaises(OSError):
setgroups(list(range(400)))
@patch('os.getgroups')
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
eperm = OSError()
eperm.errno = errno.EPERM
hack.side_effect = eperm
getgroups.return_value = list(range(400))
setgroups(list(range(400)))
getgroups.assert_called_with()
getgroups.return_value = [1000]
with self.assertRaises(OSError):
setgroups(list(range(400)))
getgroups.assert_called_with()
| sunze/py_flask | venv/lib/python3.4/site-packages/celery/tests/utils/test_platforms.py | Python | mit | 23,623 |
from functools import partial
from time import sleep
from mock import call, Mock
from scrapy.crawler import Crawler
from scrapy.http import Request
from scrapy import log, signals
from scrapy.settings import Settings
from scrapy.spider import BaseSpider
from scrapy.xlib.pydispatch import dispatcher
from twisted.internet import reactor
from scrapy_webdriver.http import WebdriverRequest
BASE_SETTINGS = dict(
DOWNLOAD_HANDLERS={
'http': 'scrapy_webdriver.download.WebdriverDownloadHandler',
'https': 'scrapy_webdriver.download.WebdriverDownloadHandler',
},
SPIDER_MIDDLEWARES={
'scrapy_webdriver.middlewares.WebdriverSpiderMiddleware': 543,
})
class TestRequestQueue:
@classmethod
def setup_class(cls):
cls._settings = BASE_SETTINGS
def settings(self, **options):
settings = self._settings.copy()
settings.update(**options)
return settings
def _stop_reactor(self):
reactor.stop()
def _wait(self, url, *args, **kwargs):
sleep(0.1)
def test_priorization(self):
webdriver = Mock()
settings = self.settings(WEBDRIVER_BROWSER=webdriver)
webdriver.get.side_effect = self._wait
webdriver.page_source = u''
dispatcher.connect(self._stop_reactor, signal=signals.spider_closed)
crawler = Crawler(Settings(values=settings))
crawler.configure()
spider = self.Spider(name='test', domain='testdomain')
crawler.crawl(spider)
crawler.start()
log.start(loglevel='ERROR')
reactor.run()
assert webdriver.get.mock_calls == [
call('http://testdomain/path?wr=0'),
call('http://testdomain/path?wr=0&wa=0'),
call('http://testdomain/path?wr=0&wa=1'),
call('http://testdomain/path?wr=1'),
call('http://testdomain/path?wr=1&wa=0'),
call('http://testdomain/path?wr=1&wa=1'),
call('http://testdomain/path?wr=0&wa=0&wr=0'),
call('http://testdomain/path?wr=0&wa=1&wr=0'),
call('http://testdomain/path?wr=1&wa=0&wr=0'),
call('http://testdomain/path?wr=1&wa=1&wr=0')]
class Spider(BaseSpider):
def start_requests(self):
for i in xrange(2):
yield WebdriverRequest('http://testdomain/path?wr=%d' % i)
yield Request('http://testdomain/path?r=%d' % i)
def parse(self, response):
def get(url):
response.webdriver.get(url)
for i in xrange(2):
fake_url = '%s&wa=%d' % (response.url, i)
request = response.action_request(url=fake_url,
callback=self.parse_action)
# Leave a trace in the webdriver instance mock so we can look
# at the request processing order.
request.actions = Mock()
request.actions.perform.side_effect = partial(get, fake_url)
yield request
def parse_action(self, response):
yield WebdriverRequest('%s&wr=%d' % (response.url, 0),
callback=self.parse_nothing)
def parse_nothing(self, response):
pass
| yupengyan/scrapy-webdriver | scrapy_webdriver/tests/test_request_queue.py | Python | mit | 3,272 |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <[email protected]>
""" simple and hopefully reusable widgets to ease
the creation of UPnP UI applications
icons taken from the Tango Desktop Project
"""
import os.path
import urllib
import traceback
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import dbus
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
import dbus.service
import mimetypes
mimetypes.init()
# dbus defines
BUS_NAME = 'org.Coherence'
OBJECT_PATH = '/org/Coherence'
# gtk store defines
NAME_COLUMN = 0
ID_COLUMN = 1
UPNP_CLASS_COLUMN = 2
CHILD_COUNT_COLUMN = 3
UDN_COLUMN = 4
SERVICE_COLUMN = 5
ICON_COLUMN = 6
DIDL_COLUMN = 7
TOOLTIP_ICON_COLUMN = 8
from pkg_resources import resource_filename
class ControlPoint(object):
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
obj = getattr(cls, '_instance_', None)
if obj is not None:
return obj
else:
obj = super(ControlPoint, cls).__new__(cls, *args, **kwargs)
cls._instance_ = obj
obj._connect(*args, **kwargs)
return obj
def __init__(self):
pass
def _connect(self):
self.bus = dbus.SessionBus()
self.coherence = self.bus.get_object(BUS_NAME,OBJECT_PATH)
class DeviceExportWidget(object):
def __init__(self,name='Nautilus',standalone=True,root=None):
self.root=root
self.uuid = None
self.name = name
self.standalone=standalone
icon = resource_filename(__name__, os.path.join('icons','emblem-new.png'))
self.new_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','emblem-shared.png'))
self.shared_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','emblem-unreadable.png'))
self.unshared_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.filestore = gtk.ListStore(str,gtk.gdk.Pixbuf)
self.coherence = ControlPoint().coherence
def build_ui(self,root=None):
if root != None:
self.root = root
self.window = gtk.VBox(homogeneous=False, spacing=0)
self.fileview = gtk.TreeView(self.filestore)
column = gtk.TreeViewColumn('Folders to share')
self.fileview.append_column(column)
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
column.pack_start(icon_cell, False)
column.pack_start(text_cell, True)
column.set_attributes(text_cell, text=0)
column.add_attribute(icon_cell, "pixbuf",1)
self.window.pack_start(self.fileview,expand=True,fill=True)
buttonbox = gtk.HBox(homogeneous=False, spacing=0)
button = gtk.Button(stock=gtk.STOCK_ADD)
button.set_sensitive(False)
button.connect("clicked", self.new_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
#button.set_sensitive(False)
button.connect("clicked", self.remove_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_CANCEL)
button.connect("clicked", self.share_cancel)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_APPLY)
button.connect("clicked", self.share_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
self.window.pack_end(buttonbox,expand=False,fill=False)
return self.window
def share_cancel(self,button):
for row in self.filestore:
print row
if row[1] == self.new_icon:
del row
continue
if row[1] == self.unshared_icon:
row[1] = self.shared_icon
if self.standalone:
gtk.main_quit()
else:
self.root.hide()
def share_files(self,button):
print "share_files with", self.uuid
folders = []
for row in self.filestore:
if row[1] == self.unshared_icon:
del row
continue
folders.append(row[0])
if self.uuid == None:
if len(folders) > 0:
self.uuid = self.coherence.add_plugin('FSStore', {'name': self.name,
'version':'1',
'create_root': 'yes',
'import_folder': '/tmp/UPnP Imports',
'content':','.join(folders)},
dbus_interface=BUS_NAME)
#self.coherence.pin('Nautilus::MediaServer::%d'%os.getpid(),self.uuid)
else:
result = self.coherence.call_plugin(self.uuid,'update_config',{'content':','.join(folders)})
if result != self.uuid:
print "something failed", result
for row in self.filestore:
row[1] = self.shared_icon
self.root.hide()
def add_files(self,files):
print "add_files", files
for filename in files:
for row in self.filestore:
if os.path.abspath(filename) == row[0]:
break
else:
self.add_file(filename)
def add_file(self,filename):
self.filestore.append([os.path.abspath(filename),self.new_icon])
def new_files(self,button):
print "new_files"
def remove_files(self,button):
print "remove_files"
selection = self.fileview.get_selection()
print selection
model, selected_rows = selection.get_selected_rows()
for row_path in selected_rows:
#model.remove(model.get_iter(row_path))
row = model[row_path]
row[1] = self.unshared_icon
class DeviceImportWidget(object):
def __init__(self,standalone=True,root=None):
self.standalone=standalone
self.root=root
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.VBox(homogeneous=False, spacing=0)
self.combobox = gtk.ComboBox()
self.store = gtk.ListStore(str, # 0: friendly name
str, # 1: device udn
gtk.gdk.Pixbuf)
icon = resource_filename(__name__, os.path.join('icons','network-server.png'))
self.device_icon = gtk.gdk.pixbuf_new_from_file(icon)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.combobox.pack_start(icon_cell, False)
self.combobox.pack_start(text_cell, True)
self.combobox.set_attributes(text_cell, text=0)
self.combobox.add_attribute(icon_cell, "pixbuf",2)
self.combobox.set_model(self.store)
item = self.store.append(None)
self.store.set_value(item, 0, 'Select a MediaServer...')
self.store.set_value(item, 1, '')
self.store.set_value(item, 2, None)
self.combobox.set_active(0)
self.window.pack_start(self.combobox,expand=False,fill=False)
self.filestore = gtk.ListStore(str)
self.fileview = gtk.TreeView(self.filestore)
column = gtk.TreeViewColumn('Files')
self.fileview.append_column(column)
text_cell = gtk.CellRendererText()
column.pack_start(text_cell, True)
column.set_attributes(text_cell, text=0)
self.window.pack_start(self.fileview,expand=True,fill=True)
buttonbox = gtk.HBox(homogeneous=False, spacing=0)
button = gtk.Button(stock=gtk.STOCK_ADD)
button.set_sensitive(False)
button.connect("clicked", self.new_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
button.set_sensitive(False)
button.connect("clicked", self.remove_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_CANCEL)
if self.standalone:
button.connect("clicked", gtk.main_quit)
else:
button.connect("clicked", lambda x: self.root.destroy())
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
button = gtk.Button(stock=gtk.STOCK_APPLY)
button.connect("clicked", self.import_files)
buttonbox.pack_start(button, expand=False,fill=False, padding=2)
self.window.pack_end(buttonbox,expand=False,fill=False)
def add_file(self,filename):
self.filestore.append([os.path.abspath(filename)])
def new_files(self,button):
print "new_files"
def remove_files(self,button):
print "remove_files"
def import_files(self,button):
print "import_files"
active = self.combobox.get_active()
if active <= 0:
print "no MediaServer selected"
return None
friendlyname, uuid,_ = self.store[active]
try:
row = self.filestore[0]
print 'import to', friendlyname,os.path.basename(row[0])
def success(r):
print 'success',r
self.filestore.remove(self.filestore.get_iter(0))
self.import_files(None)
def reply(r):
print 'reply',r['Result'], r['ObjectID']
from coherence.upnp.core import DIDLLite
didl = DIDLLite.DIDLElement.fromString(r['Result'])
item = didl.getItems()[0]
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
print 'importURI',res[0].importUri
self.coherence.put_resource(res[0].importUri,row[0],
reply_handler=success,
error_handler=self.handle_error)
mimetype,_ = mimetypes.guess_type(row[0], strict=False)
if mimetype.startswith('image/'):
upnp_class = 'object.item.imageItem'
elif mimetype.startswith('video/'):
upnp_class = 'object.item.videoItem'
elif mimetype.startswith('audio/'):
upnp_class = 'object.item.audioItem'
else:
upnp_class = 'object.item'
self.coherence.create_object(uuid,'DLNA.ORG_AnyContainer',
{'parentID':'DLNA.ORG_AnyContainer','upnp_class':upnp_class,'title':os.path.basename(row[0])},
reply_handler=reply,
error_handler=self.handle_error)
except IndexError:
pass
def handle_error(self,error):
print error
def handle_devices_reply(self,devices):
for device in devices:
if device['device_type'].split(':')[3] == 'MediaServer':
self.media_server_found(device)
def init_controlpoint(self):
cp = ControlPoint()
self.bus = cp.bus
self.coherence = cp.coherence
self.coherence.get_devices(dbus_interface=BUS_NAME,
reply_handler=self.handle_devices_reply,
error_handler=self.handle_error)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_detected', self.media_server_found, dbus_interface=BUS_NAME)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_removed', self.media_server_removed, dbus_interface=BUS_NAME)
self.devices = {}
def media_server_found(self,device,udn=None):
for service in device['services']:
service_type = service.split('/')[-1]
if service_type == 'ContentDirectory':
def got_icons(r,udn,item):
print 'got_icons', r
for icon in r:
###FIXME, we shouldn't just use the first icon
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(icon['url'])).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
icon = icon.scale_simple(16,16,gtk.gdk.INTERP_BILINEAR)
self.store.set_value(item, 2, icon)
break
def reply(r,udn):
if 'CreateObject' in r:
self.devices[udn] = {'ContentDirectory':{}}
self.devices[udn]['ContentDirectory']['actions'] = r
item = self.store.append(None)
self.store.set_value(item, 0, str(device['friendly_name']))
self.store.set_value(item, 1, str(device['udn']))
self.store.set_value(item, 2, self.device_icon)
d = self.bus.get_object(BUS_NAME+'.device',device['path'])
d.get_device_icons(reply_handler=lambda x : got_icons(x,str(device['udn']),item),error_handler=self.handle_error)
s = self.bus.get_object(BUS_NAME+'.service',service)
s.get_available_actions(reply_handler=lambda x : reply(x,str(device['udn'])),error_handler=self.handle_error)
def media_server_removed(self,udn):
row_count = 0
for row in self.store:
if udn == row[1]:
self.store.remove(self.store.get_iter(row_count))
del self.devices[str(udn)]
break
row_count += 1
class TreeWidget(object):
def __init__(self,cb_item_dbl_click=None,
cb_resource_chooser=None):
self.cb_item_dbl_click = cb_item_dbl_click
self.cb_item_right_click = None
self.cb_resource_chooser = cb_resource_chooser
self.build_ui()
self.init_controlpoint()
def build_ui(self):
self.window = gtk.ScrolledWindow()
self.window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
icon = resource_filename(__name__, os.path.join('icons','network-server.png'))
self.device_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','folder.png'))
self.folder_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','audio-x-generic.png'))
self.audio_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','video-x-generic.png'))
self.video_icon = gtk.gdk.pixbuf_new_from_file(icon)
icon = resource_filename(__name__, os.path.join('icons','image-x-generic.png'))
self.image_icon = gtk.gdk.pixbuf_new_from_file(icon)
self.store = gtk.TreeStore(str, # 0: name or title
str, # 1: id, '0' for the device
str, # 2: upnp_class, 'root' for the device
int, # 3: child count, -1 if not available
str, # 4: device udn, '' for an item
str, # 5: service path, '' for a non container item
gtk.gdk.Pixbuf,
str, # 7: DIDLLite fragment, '' for a non upnp item
gtk.gdk.Pixbuf
)
self.treeview = gtk.TreeView(self.store)
self.column = gtk.TreeViewColumn('MediaServers')
self.treeview.append_column(self.column)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.column.pack_start(icon_cell, False)
self.column.pack_start(text_cell, True)
self.column.set_attributes(text_cell, text=0)
self.column.add_attribute(icon_cell, "pixbuf",6)
#self.column.set_cell_data_func(self.cellpb, get_icon)
#self.treeview.insert_column_with_attributes(-1, 'MediaServers', cell, text=0)
self.treeview.connect("row-activated", self.browse)
self.treeview.connect("row-expanded", self.row_expanded)
self.treeview.connect("button_press_event", self.button_action)
self.treeview.set_property("has-tooltip", True)
self.treeview.connect("query-tooltip", self.show_tooltip)
self.tooltip_path = None
self.we_are_scrolling = None
def end_scrolling():
self.we_are_scrolling = None
def start_scrolling(w,e):
if self.we_are_scrolling != None:
gobject.source_remove(self.we_are_scrolling)
self.we_are_scrolling = gobject.timeout_add(800, end_scrolling)
self.treeview.connect('scroll-event', start_scrolling)
self.window.add(self.treeview)
def show_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if self.we_are_scrolling != None:
return False
ret = False
try:
path = self.treeview.get_dest_row_at_pos(x, y)
iter = self.store.get_iter(path[0])
title,object_id,upnp_class,item = self.store.get(iter,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN,DIDL_COLUMN)
from coherence.upnp.core import DIDLLite
if upnp_class == 'object.item.videoItem':
self.tooltip_path = object_id
item = DIDLLite.DIDLElement.fromString(item).getItems()[0]
tooltip_icon, = self.store.get(iter,TOOLTIP_ICON_COLUMN)
if tooltip_icon != None:
tooltip.set_icon(tooltip_icon)
else:
tooltip.set_icon(self.video_icon)
for res in item.res:
protocol,network,content_format,additional_info = res.protocolInfo.split(':')
if(content_format == 'image/jpeg' and
'DLNA.ORG_PN=JPEG_TN' in additional_info.split(';')):
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(res.data)).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
tooltip.set_icon(icon)
self.store.set_value(iter, TOOLTIP_ICON_COLUMN, icon)
#print "got poster", icon
break
title = title.replace('&','&')
try:
director = item.director.replace('&','&')
except AttributeError:
director = ""
try:
description = item.description.replace('&','&')
except AttributeError:
description = ""
tooltip.set_markup("<b>%s</b>\n"
"<b>Director:</b> %s\n"
"<b>Description:</b> %s" % (title,
director,
description))
ret = True
except TypeError:
#print traceback.format_exc()
pass
except Exception:
#print traceback.format_exc()
#print "something wrong"
pass
return ret
def button_action(self, widget, event):
#print "button_action", widget, event, event.button
if self.cb_item_right_click != None:
return self.cb_item_right_click(widget, event)
return 0
def handle_error(self,error):
print error
def handle_devices_reply(self,devices):
for device in devices:
if device['device_type'].split(':')[3] == 'MediaServer':
self.media_server_found(device)
def init_controlpoint(self):
cp = ControlPoint()
self.bus = cp.bus
self.coherence = cp.coherence
self.hostname = self.coherence.hostname(dbus_interface=BUS_NAME)
self.coherence.get_devices(dbus_interface=BUS_NAME,
reply_handler=self.handle_devices_reply,
error_handler=self.handle_error)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_detected', self.media_server_found, dbus_interface=BUS_NAME)
self.coherence.connect_to_signal('UPnP_ControlPoint_MediaServer_removed', self.media_server_removed, dbus_interface=BUS_NAME)
self.devices = {}
def device_has_action(self,udn,service,action):
try:
self.devices[udn][service]['actions'].index(action)
return True
except:
return False
def state_variable_change( self, udn, service, variable, value):
#print "state_variable_change", udn, service, variable, 'changed to', value
if variable == 'ContainerUpdateIDs':
changes = value.split(',')
while len(changes) > 1:
container = changes.pop(0).strip()
update_id = changes.pop(0).strip()
def match_func(model, iter, data):
column, key = data # data is a tuple containing column number, key
value = model.get_value(iter, column)
return value == key
def search(model, iter, func, data):
#print "search", model, iter, data
while iter:
if func(model, iter, data):
return iter
result = search(model, model.iter_children(iter), func, data)
if result: return result
iter = model.iter_next(iter)
return None
row_count = 0
for row in self.store:
if udn == row[UDN_COLUMN]:
iter = self.store.get_iter(row_count)
match_iter = search(self.store, self.store.iter_children(iter),
match_func, (ID_COLUMN, container))
if match_iter:
print "heureka, we have a change in ", container, ", container needs a reload"
path = self.store.get_path(match_iter)
expanded = self.treeview.row_expanded(path)
child = self.store.iter_children(match_iter)
while child:
self.store.remove(child)
child = self.store.iter_children(match_iter)
self.browse(self.treeview,path,None,
starting_index=0,requested_count=0,force=True,expand=expanded)
break
row_count += 1
def media_server_found(self,device,udn=None):
#print "media_server_found", device['friendly_name']
item = self.store.append(None)
self.store.set_value(item, NAME_COLUMN, device['friendly_name'])
self.store.set_value(item, ID_COLUMN, '0')
self.store.set_value(item, UPNP_CLASS_COLUMN, 'root')
self.store.set_value(item, CHILD_COUNT_COLUMN, -1)
self.store.set_value(item, UDN_COLUMN, str(device['udn']))
self.store.set_value(item, ICON_COLUMN, self.device_icon)
self.store.set_value(item, DIDL_COLUMN, '')
self.store.set_value(item, TOOLTIP_ICON_COLUMN, None)
self.store.append(item, ('...loading...','','placeholder',-1,'','',None,'',None))
self.devices[str(device['udn'])] = {'ContentDirectory':{}}
for service in device['services']:
service_type = service.split('/')[-1]
if service_type == 'ContentDirectory':
self.store.set_value(item, SERVICE_COLUMN, service)
self.devices[str(device['udn'])]['ContentDirectory'] = {}
def reply(r,udn):
self.devices[udn]['ContentDirectory']['actions'] = r
def got_icons(r,udn,item):
#print 'got_icons', r
for icon in r:
###FIXME, we shouldn't just use the first icon
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(icon['url'])).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
icon = icon.scale_simple(16,16,gtk.gdk.INTERP_BILINEAR)
self.store.set_value(item, ICON_COLUMN, icon)
break
def reply_subscribe(udn, service, r):
for k,v in r.iteritems():
self.state_variable_change(udn,service,k,v)
s = self.bus.get_object(BUS_NAME+'.service',service)
s.connect_to_signal('StateVariableChanged', self.state_variable_change, dbus_interface=BUS_NAME+'.service')
s.get_available_actions(reply_handler=lambda x : reply(x,str(device['udn'])),error_handler=self.handle_error)
s.subscribe(reply_handler=reply_subscribe,error_handler=self.handle_error)
d = self.bus.get_object(BUS_NAME+'.device',device['path'])
d.get_device_icons(reply_handler=lambda x : got_icons(x,str(device['udn']),item),error_handler=self.handle_error)
def media_server_removed(self,udn):
#print "media_server_removed", udn
row_count = 0
for row in self.store:
if udn == row[UDN_COLUMN]:
self.store.remove(self.store.get_iter(row_count))
del self.devices[str(udn)]
break
row_count += 1
def row_expanded(self,view,iter,row_path):
#print "row_expanded", view,iter,row_path
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class == 'placeholder':
self.browse(view,row_path,None)
def browse(self,view,row_path,column,starting_index=0,requested_count=0,force=False,expand=False):
#print "browse", view,row_path,column,starting_index,requested_count,force
iter = self.store.get_iter(row_path)
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class != 'placeholder':
if force == False:
if view.row_expanded(row_path):
view.collapse_row(row_path)
else:
view.expand_row(row_path, False)
return
title,object_id,upnp_class = self.store.get(iter,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN)
if(not upnp_class.startswith('object.container') and
not upnp_class == 'root'):
url, = self.store.get(iter,SERVICE_COLUMN)
if url == '':
return
print "request to play:", title,object_id,url
if self.cb_item_dbl_click != None:
self.cb_item_dbl_click(url)
return
def reply(r):
#print "browse_reply - %s of %s returned" % (r['NumberReturned'],r['TotalMatches'])
from coherence.upnp.core import DIDLLite
child = self.store.iter_children(iter)
if child:
upnp_class, = self.store.get(child,UPNP_CLASS_COLUMN)
if upnp_class == 'placeholder':
self.store.remove(child)
title, = self.store.get(iter,NAME_COLUMN)
try:
title = title[:title.rindex('(')]
self.store.set_value(iter,NAME_COLUMN, "%s(%d)" % (title,int(r['TotalMatches'])))
except ValueError:
pass
didl = DIDLLite.DIDLElement.fromString(r['Result'])
for item in didl.getItems():
#print item.title, item.id, item.upnp_class
if item.upnp_class.startswith('object.container'):
icon = self.folder_icon
service, = self.store.get(iter,SERVICE_COLUMN)
child_count = item.childCount
try:
title = "%s (%d)" % (item.title,item.childCount)
except TypeError:
title = "%s (n/a)" % item.title
child_count = -1
else:
icon=None
service = ''
if callable(self.cb_resource_chooser):
service = self.cb_resource_chooser(item.res)
else:
res = item.res.get_matching(['*:%s:*:*' % self.hostname], protocol_type='internal')
if len(res) == 0:
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
res = res[0]
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
service = res.data
child_count = -1
title = item.title
if item.upnp_class.startswith('object.item.audioItem'):
icon = self.audio_icon
elif item.upnp_class.startswith('object.item.videoItem'):
icon = self.video_icon
elif item.upnp_class.startswith('object.item.imageItem'):
icon = self.image_icon
stored_didl = DIDLLite.DIDLElement()
stored_didl.addItem(item)
new_iter = self.store.append(iter, (title,item.id,item.upnp_class,child_count,'',service,icon,stored_didl.toString(),None))
if item.upnp_class.startswith('object.container'):
self.store.append(new_iter, ('...loading...','','placeholder',-1,'','',None,'',None))
if((int(r['TotalMatches']) > 0 and force==False) or
expand==True):
view.expand_row(row_path, False)
if(requested_count != int(r['NumberReturned']) and
int(r['NumberReturned']) < (int(r['TotalMatches'])-starting_index)):
print "seems we have been returned only a part of the result"
print "requested %d, starting at %d" % (requested_count,starting_index)
print "got %d out of %d" % (int(r['NumberReturned']), int(r['TotalMatches']))
print "requesting more starting now at %d" % (starting_index+int(r['NumberReturned']))
self.browse(view,row_path,column,
starting_index=starting_index+int(r['NumberReturned']),
force=True)
service, = self.store.get(iter,SERVICE_COLUMN)
if service == '':
return
s = self.bus.get_object(BUS_NAME+'.service',service)
s.action('browse',
{'object_id':object_id,'process_result':'no',
'starting_index':str(starting_index),'requested_count':str(requested_count)},
reply_handler=reply,error_handler=self.handle_error)
def destroy_object(self, row_path):
#print "destroy_object", row_path
iter = self.store.get_iter(row_path)
object_id, = self.store.get(iter,ID_COLUMN)
parent_iter = self.store.iter_parent(iter)
service, = self.store.get(parent_iter,SERVICE_COLUMN)
if service == '':
return
def reply(r):
#print "destroy_object reply", r
pass
s = self.bus.get_object(BUS_NAME+'.service',service)
s.action('destroy_object',
{'object_id':object_id},
reply_handler=reply,error_handler=self.handle_error)
if __name__ == '__main__':
ui=TreeWidget()
window = gtk.Window()
window.connect("delete_event", gtk.main_quit)
window.set_default_size(350, 550)
window.add(ui.window)
window.show_all()
gtk.gdk.threads_init()
gtk.main() | sreichholf/python-coherence | coherence/ui/av_widgets.py | Python | mit | 32,894 |
# Module tests
# Copyright (c) 2014, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import time
import hostapd
def test_module_wpa_supplicant(dev, apdev, params):
"""wpa_supplicant module tests"""
if "OK" not in dev[0].global_request("MODULE_TESTS"):
raise Exception("Module tests failed")
# allow eloop test to complete
time.sleep(0.75)
dev[0].relog()
with open(os.path.join(params['logdir'], 'log0'), 'r') as f:
res = f.read()
if "FAIL - should not have called this function" in res:
raise Exception("eloop test failed")
def test_module_hostapd(dev):
"""hostapd module tests"""
hapd_global = hostapd.HostapdGlobal()
if "OK" not in hapd_global.ctrl.request("MODULE_TESTS"):
raise Exception("Module tests failed")
| s0lst1c3/eaphammer | local/hostapd-eaphammer/tests/hwsim/test_module_tests.py | Python | gpl-3.0 | 889 |
'''
Created on Jun 11, 2011
@author: mkiyer
'''
class Breakpoint(object):
def __init__(self):
self.name = None
self.seq5p = None
self.seq3p = None
self.chimera_names = []
@property
def pos(self):
"""
return position of break along sequence measured from 5' -> 3'
"""
return len(self.seq5p)
@staticmethod
def from_list(fields):
b = Breakpoint()
b.name = fields[0]
b.seq5p = fields[1]
b.seq3p = fields[2]
b.chimera_names = fields[3].split(',')
return b
def to_list(self):
fields = [self.name, self.seq5p, self.seq3p]
fields.append(','.join(self.chimera_names))
return fields | madhavsuresh/chimerascan | chimerascan/deprecated/breakpoint.py | Python | gpl-3.0 | 751 |
#!/usr/bin/python
#
# Copyright (c) 2017 Bruno Medina Bolanos Cacho <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_managed_disk
version_added: "2.4"
short_description: Manage Azure Manage Disks
description:
- Create, update and delete an Azure Managed Disk
options:
resource_group:
description:
- Name of a resource group where the managed disk exists or will be created.
required: true
name:
description:
- Name of the managed disk.
required: true
state:
description:
- Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
storage_account_type:
description:
- "Type of storage for the managed disk: C(Standard_LRS) or C(Premium_LRS). If not specified the disk is created C(Standard_LRS)."
choices:
- Standard_LRS
- Premium_LRS
create_option:
description:
- "Allowed values: empty, import, copy.
- C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri)."
choices:
- empty
- import
- copy
source_uri:
description:
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
aliases:
- source_resource_uri
os_type:
description:
- "Type of Operating System: C(linux) or C(windows)."
- "Used when I(create_option) is either C(copy) or C(import) and the source is an OS disk."
- "If omitted during creation, no value is set."
- "If omitted during an update, no change is made."
- "Once set, this value cannot be cleared."
choices:
- linux
- windows
disk_size_gb:
description:
- "Size in GB of the managed disk to be created."
- "If I(create_option) is C(copy) then the value must be greater than or equal to the source's size."
managed_by:
description:
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
- To detach a disk from a vm, explicitly set to ''.
- If this option is unset, the value will not be changed.
version_added: 2.5
tags:
description:
- Tags to assign to the managed disk.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Bruno Medina (@brusMX)"
'''
EXAMPLES = '''
- name: Create managed disk
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Create managed operating system disk from page blob
azure_rm_managed_disk:
name: mymanageddisk
location: eastus2
resource_group: myResourceGroup
create_option: import
source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd
os_type: windows
storage_account_type: Premium_LRS
- name: Mount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
managed_by: testvm001
- name: Unmount the managed disk to VM
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
disk_size_gb: 4
- name: Delete managed disk
azure_rm_manage_disk:
name: mymanageddisk
location: eastus
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
id:
description: The managed disk resource ID.
returned: always
type: dict
state:
description: Current state of the managed disk
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import re
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
# duplicated in azure_rm_managed_disk_facts
def managed_disk_to_dict(managed_disk):
create_data = managed_disk.creation_data
return dict(
id=managed_disk.id,
name=managed_disk.name,
location=managed_disk.location,
tags=managed_disk.tags,
create_option=create_data.create_option.lower(),
source_uri=create_data.source_uri or create_data.source_resource_id,
disk_size_gb=managed_disk.disk_size_gb,
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
managed_by=managed_disk.managed_by
)
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
storage_account_type=dict(
type='str',
choices=['Standard_LRS', 'Premium_LRS']
),
create_option=dict(
type='str',
choices=['empty', 'import', 'copy']
),
source_uri=dict(
type='str',
aliases=['source_resource_uri']
),
os_type=dict(
type='str',
choices=['linux', 'windows']
),
disk_size_gb=dict(
type='int'
),
managed_by=dict(
type='str'
)
)
required_if = [
('create_option', 'import', ['source_uri']),
('create_option', 'copy', ['source_uri']),
('create_option', 'empty', ['disk_size_gb'])
]
self.results = dict(
changed=False,
state=dict())
self.resource_group = None
self.name = None
self.location = None
self.storage_account_type = None
self.create_option = None
self.source_uri = None
self.os_type = None
self.disk_size_gb = None
self.tags = None
self.managed_by = None
super(AzureRMManagedDisk, self).__init__(
derived_arg_spec=self.module_arg_spec,
required_if=required_if,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
result = None
changed = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
disk_instance = self.get_managed_disk()
result = disk_instance
# need create or update
if self.state == 'present':
parameter = self.generate_managed_disk_property()
if not disk_instance or self.is_different(disk_instance, parameter):
changed = True
if not self.check_mode:
result = self.create_or_update_managed_disk(parameter)
else:
result = True
# unmount from the old virtual machine and mount to the new virtual machine
if self.managed_by or self.managed_by == '':
vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None
vm_name = vm_name or ''
if self.managed_by != vm_name:
changed = True
if not self.check_mode:
if vm_name:
self.detach(vm_name, result)
if self.managed_by:
self.attach(self.managed_by, result)
result = self.get_managed_disk()
if self.state == 'absent' and disk_instance:
changed = True
if not self.check_mode:
self.delete_managed_disk()
result = True
self.results['changed'] = changed
self.results['state'] = result
return self.results
def attach(self, vm_name, disk):
vm = self._get_vm(vm_name)
# find the lun
luns = ([d.lun for d in vm.storage_profile.data_disks]
if vm.storage_profile.data_disks else [])
lun = max(luns) + 1 if luns else 0
# prepare the data disk
params = self.compute_models.ManagedDiskParameters(id=disk.get('id'), storage_account_type=disk.get('storage_account_type'))
data_disk = self.compute_models.DataDisk(lun=lun, create_option=self.compute_models.DiskCreateOptionTypes.attach, managed_disk=params)
vm.storage_profile.data_disks.append(data_disk)
self._update_vm(vm_name, vm)
def detach(self, vm_name, disk):
vm = self._get_vm(vm_name)
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk.get('name').lower()]
if len(vm.storage_profile.data_disks) == len(leftovers):
self.fail("No disk with the name '{0}' was found".format(disk.get('name')))
vm.storage_profile.data_disks = leftovers
self._update_vm(vm_name, vm)
def _update_vm(self, name, params):
try:
poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, name, params)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error updating virtual machine {0} - {1}".format(name, str(exc)))
def _get_vm(self, name):
try:
return self.compute_client.virtual_machines.get(self.resource_group, name, expand='instanceview')
except Exception as exc:
self.fail("Error getting virtual machine {0} - {1}".format(name, str(exc)))
def generate_managed_disk_property(self):
# TODO: Add support for EncryptionSettings, DiskIOPSReadWrite, DiskMBpsReadWrite, Zones
disk_params = {}
creation_data = {}
disk_params['location'] = self.location
disk_params['tags'] = self.tags
if self.storage_account_type:
storage_account_type = self.compute_models.DiskSku(name=self.storage_account_type)
disk_params['sku'] = storage_account_type
disk_params['disk_size_gb'] = self.disk_size_gb
creation_data['create_option'] = self.compute_models.DiskCreateOption.empty
if self.create_option == 'import':
creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum
creation_data['source_uri'] = self.source_uri
elif self.create_option == 'copy':
creation_data['create_option'] = self.compute_models.DiskCreateOption.copy
creation_data['source_resource_id'] = self.source_uri
if self.os_type:
typecon = {
'linux': self.compute_models.OperatingSystemTypes.linux,
'windows': self.compute_models.OperatingSystemTypes.windows
}
disk_params['os_type'] = typecon[self.os_type]
else:
disk_params['os_type'] = None
disk_params['creation_data'] = creation_data
return disk_params
def create_or_update_managed_disk(self, parameter):
try:
poller = self.compute_client.disks.create_or_update(
self.resource_group,
self.name,
parameter)
aux = self.get_poller_result(poller)
return managed_disk_to_dict(aux)
except CloudError as e:
self.fail("Error creating the managed disk: {0}".format(str(e)))
# This method accounts for the difference in structure between the
# Azure retrieved disk and the parameters for the new disk to be created.
def is_different(self, found_disk, new_disk):
resp = False
if new_disk.get('disk_size_gb'):
if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']:
resp = True
if new_disk.get('os_type'):
if not found_disk['os_type'] == new_disk['os_type']:
resp = True
if new_disk.get('sku'):
if not found_disk['storage_account_type'] == new_disk['sku'].name:
resp = True
# Check how to implement tags
if new_disk.get('tags') is not None:
if not found_disk['tags'] == new_disk['tags']:
resp = True
return resp
def delete_managed_disk(self):
try:
poller = self.compute_client.disks.delete(
self.resource_group,
self.name)
return self.get_poller_result(poller)
except CloudError as e:
self.fail("Error deleting the managed disk: {0}".format(str(e)))
def get_managed_disk(self):
try:
resp = self.compute_client.disks.get(
self.resource_group,
self.name)
return managed_disk_to_dict(resp)
except CloudError as e:
self.log('Did not find managed disk')
def main():
"""Main execution"""
AzureRMManagedDisk()
if __name__ == '__main__':
main()
| EvanK/ansible | lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py | Python | gpl-3.0 | 14,499 |
#!/usr/bin/python
# James Laska ([email protected])
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: redhat_subscription
short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
description:
- Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
version_added: "1.2"
author: "Barnaby Court (@barnabycourt)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
- Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
config file and default to None.
requirements:
- subscription-manager
options:
state:
description:
- whether to register and subscribe (C(present)), or unregister (C(absent)) a system
choices: [ "present", "absent" ]
default: "present"
username:
description:
- access.redhat.com or Sat6 username
password:
description:
- access.redhat.com or Sat6 password
server_hostname:
description:
- Specify an alternative Red Hat Subscription Management or Sat6 server
server_insecure:
description:
- Enable or disable https server certificate verification when connecting to C(server_hostname)
rhsm_baseurl:
description:
- Specify CDN baseurl
server_proxy_hostname:
description:
- Specify a HTTP proxy hostname
version_added: "2.4"
server_proxy_port:
description:
- Specify a HTTP proxy port
version_added: "2.4"
server_proxy_user:
description:
- Specify a user for HTTP proxy with basic authentication
version_added: "2.4"
server_proxy_password:
description:
- Specify a password for HTTP proxy with basic authentication
version_added: "2.4"
auto_attach:
description:
- Upon successful registration, auto-consume available subscriptions
- Added in favor of depracated autosubscribe in 2.5.
type: bool
default: 'no'
version_added: "2.5"
aliases: [autosubscribe]
activationkey:
description:
- supply an activation key for use with registration
org_id:
description:
- Organization ID to use in conjunction with activationkey
version_added: "2.0"
environment:
description:
- Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
version_added: "2.2"
pool:
description:
- |
Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
possible, as it is much faster. Mutually exclusive with I(pool_ids).
default: '^$'
pool_ids:
description:
- |
Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
default: []
version_added: "2.4"
consumer_type:
description:
- The type of unit to register, defaults to system
version_added: "2.1"
consumer_name:
description:
- Name of the system to register, defaults to the hostname
version_added: "2.1"
consumer_id:
description:
- |
References an existing consumer ID to resume using a previous registration
for this system. If the system's identity certificate is lost or corrupted,
this option allows it to resume using its previous identity and subscriptions.
The default is to not specify a consumer ID so a new ID is created.
version_added: "2.1"
force_register:
description:
- Register the system even if it is already registered
type: bool
default: 'no'
version_added: "2.2"
'''
EXAMPLES = '''
- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
redhat_subscription:
state: present
username: joe_user
password: somepass
auto_attach: true
- name: Same as above but subscribe to a specific pool by ID.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids: 0123456789abcdef0123456789abcdef
- name: Register and subscribe to multiple pools.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef
- 1123456789abcdef0123456789abcdef
- name: Same as above but consume multiple entitlements.
redhat_subscription:
state: present
username: joe_user
password: somepass
pool_ids:
- 0123456789abcdef0123456789abcdef: 2
- 1123456789abcdef0123456789abcdef: 4
- name: Register and pull existing system data.
redhat_subscription:
state: present
username: joe_user
password: somepass
consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
redhat_subscription:
state: present
activationkey: 1-222333444
org_id: 222333444
pool: '^Red Hat Enterprise Server$'
- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
redhat_subscription:
state: present
username: joe_user
password: somepass
environment: Library
auto_attach: true
'''
RETURN = '''
subscribed_pool_ids:
description: List of pool IDs to which system is now subscribed
returned: success
type: complex
contains: {
"8a85f9815ab905d3015ab928c7005de4": "1"
}
'''
import os
import re
import shutil
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves import configparser
SUBMAN_CMD = None
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
tmpfd, tmpfile = tempfile.mkstemp()
shutil.copy2(plugin_conf, tmpfile)
cfg = configparser.ConfigParser()
cfg.read([tmpfile])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(tmpfile, 'w+')
cfg.write(fd)
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.module = module
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHSM
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--server.hostname'.
for k, v in kwargs.items():
if re.search(r'^(server|rhsm)_', k) and v is not None:
args.append('--%s=%s' % (k.replace('_', '.', 1), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHSM.
'''
args = [SUBMAN_CMD, 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register, environment,
rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
server_proxy_port, server_proxy_user, server_proxy_password):
'''
Register the current system to the provided RHSM or Sat6 server
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'register']
# Generate command arguments
if force_register:
args.extend(['--force'])
if rhsm_baseurl:
args.extend(['--baseurl', rhsm_baseurl])
if server_insecure:
args.extend(['--insecure'])
if server_hostname:
args.extend(['--serverurl', server_hostname])
if org_id:
args.extend(['--org', org_id])
if activationkey:
args.extend(['--activationkey', activationkey])
else:
if auto_attach:
args.append('--auto-attach')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
if consumer_type:
args.extend(['--type', consumer_type])
if consumer_name:
args.extend(['--name', consumer_name])
if consumer_id:
args.extend(['--consumerid', consumer_id])
if environment:
args.extend(['--environment', environment])
if server_proxy_hostname and server_proxy_port:
args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
if server_proxy_user:
args.extend(['--proxyuser', server_proxy_user])
if server_proxy_password:
args.extend(['--proxypassword', server_proxy_password])
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self, serials=None):
'''
Unsubscribe a system from subscribed channels
Args:
serials(list or None): list of serials to unsubscribe. If
serials is none or an empty list, then
all subscribed channels will be removed.
Raises:
* Exception - if error occurs while running command
'''
items = []
if serials is not None and serials:
items = ["--serial=%s" % s for s in serials]
if serials is None:
items = ["--all"]
if items:
args = [SUBMAN_CMD, 'unsubscribe'] + items
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return serials
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = [SUBMAN_CMD, 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', False)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression. It matches regexp against available pool ids first.
If any pool ids match, subscribe to those pools and return.
If no pool ids match, then match regexp against available pool product
names. Note this can still easily match many many pools. Then subscribe
to those pools.
Since a pool id is a more specific match, we only fallback to matching
against names if we didn't match pool ids.
Raises:
* Exception - if error occurs while running command
'''
# See https://github.com/ansible/ansible/issues/19466
# subscribe to pools whose pool id matches regexp (and only the pool id)
subscribed_pool_ids = self.subscribe_pool(regexp)
# If we found any matches, we are done
# Don't attempt to match pools by product name
if subscribed_pool_ids:
return subscribed_pool_ids
# We didn't match any pool ids.
# Now try subscribing to pools based on product name match
# Note: This can match lots of product names.
subscribed_by_product_pool_ids = self.subscribe_product(regexp)
if subscribed_by_product_pool_ids:
return subscribed_by_product_pool_ids
# no matches
return []
def subscribe_by_pool_ids(self, pool_ids):
for pool_id, quantity in pool_ids.items():
args = [SUBMAN_CMD, 'attach', '--pool', pool_id, '--quantity', quantity]
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
return pool_ids
def subscribe_pool(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_pools(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def subscribe_product(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
subscribed_pool_ids = []
for pool in available_pools.filter_products(regexp):
pool.subscribe()
subscribed_pool_ids.append(pool.get_pool_id())
return subscribed_pool_ids
def update_subscriptions(self, regexp):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
serials = self.unsubscribe(serials=serials_to_remove)
subscribed_pool_ids = self.subscribe(regexp)
if subscribed_pool_ids or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
'unsubscribed_serials': serials}
def update_subscriptions_by_pool_ids(self, pool_ids):
changed = False
consumed_pools = RhsmPools(self.module, consumed=True)
existing_pools = {}
for p in consumed_pools:
existing_pools[p.get_pool_id()] = p.QuantityUsed
serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
serials = self.unsubscribe(serials=serials_to_remove)
missing_pools = {}
for pool_id, quantity in pool_ids.items():
if existing_pools.get(pool_id, 0) != quantity:
missing_pools[pool_id] = quantity
self.subscribe_by_pool_ids(missing_pools)
if missing_pools or serials:
changed = True
return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
'unsubscribed_serials': serials}
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def get_pool_id(self):
return getattr(self, 'PoolId', getattr(self, 'PoolID'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.get_pool_id()
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module, consumed=False):
self.module = module
self.products = self._load_product_list(consumed)
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self, consumed=False):
"""
Loads list of all available or consumed pools for system in data structure
Args:
consumed(bool): if True list consumed pools, else list available pools (default False)
"""
args = "subscription-manager list"
if consumed:
args += " --consumed"
else:
args += " --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of a output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':', 1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
# else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter_pools(self, regexp='^$'):
'''
Return a list of RhsmPools whose pool id matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product.get_pool_id()):
yield product
def filter_products(self, regexp='^$'):
'''
Return a list of RhsmPools whose product name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
def main():
# Load RHSM configuration from file
rhsm = Rhsm(None)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present',
choices=['present', 'absent']),
username=dict(default=None,
required=False),
password=dict(default=None,
required=False,
no_log=True),
server_hostname=dict(default=None,
required=False),
server_insecure=dict(default=None,
required=False),
rhsm_baseurl=dict(default=None,
required=False),
auto_attach=dict(aliases=['autosubscribe'], default=False, type='bool'),
activationkey=dict(default=None,
required=False,
no_log=True),
org_id=dict(default=None,
required=False),
environment=dict(default=None,
required=False, type='str'),
pool=dict(default='^$',
required=False,
type='str'),
pool_ids=dict(default=[],
required=False,
type='list'),
consumer_type=dict(default=None,
required=False),
consumer_name=dict(default=None,
required=False),
consumer_id=dict(default=None,
required=False),
force_register=dict(default=False,
type='bool'),
server_proxy_hostname=dict(default=None,
required=False),
server_proxy_port=dict(default=None,
required=False),
server_proxy_user=dict(default=None,
required=False),
server_proxy_password=dict(default=None,
required=False,
no_log=True),
),
required_together=[['username', 'password'],
['server_proxy_hostname', 'server_proxy_port'],
['server_proxy_user', 'server_proxy_password']],
mutually_exclusive=[['activationkey', 'username'],
['activationkey', 'consumer_id'],
['activationkey', 'environment'],
['activationkey', 'autosubscribe'],
['force', 'consumer_id'],
['pool', 'pool_ids']],
required_if=[['state', 'present', ['username', 'activationkey'], True]],
)
rhsm.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
server_hostname = module.params['server_hostname']
server_insecure = module.params['server_insecure']
rhsm_baseurl = module.params['rhsm_baseurl']
auto_attach = module.params['auto_attach']
activationkey = module.params['activationkey']
org_id = module.params['org_id']
if activationkey and not org_id:
module.fail_json(msg='org_id is required when using activationkey')
environment = module.params['environment']
pool = module.params['pool']
pool_ids = {}
for value in module.params['pool_ids']:
if isinstance(value, dict):
if len(value) != 1:
module.fail_json(msg='Unable to parse pool_ids option.')
pool_id, quantity = value.items()[0]
else:
pool_id, quantity = value, 1
pool_ids[pool_id] = str(quantity)
consumer_type = module.params["consumer_type"]
consumer_name = module.params["consumer_name"]
consumer_id = module.params["consumer_id"]
force_register = module.params["force_register"]
server_proxy_hostname = module.params['server_proxy_hostname']
server_proxy_port = module.params['server_proxy_port']
server_proxy_user = module.params['server_proxy_user']
server_proxy_password = module.params['server_proxy_password']
global SUBMAN_CMD
SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
# Ensure system is registered
if state == 'present':
# Register system
if rhsm.is_registered and not force_register:
if pool != '^$' or pool_ids:
try:
if pool_ids:
result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
else:
result = rhsm.update_subscriptions(pool)
except Exception as e:
module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(**result)
else:
module.exit_json(changed=False, msg="System already registered.")
else:
try:
rhsm.enable()
rhsm.configure(**module.params)
rhsm.register(username, password, auto_attach, activationkey, org_id,
consumer_type, consumer_name, consumer_id, force_register,
environment, rhsm_baseurl, server_insecure, server_hostname,
server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password)
if pool_ids:
subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
else:
subscribed_pool_ids = rhsm.subscribe(pool)
except Exception as e:
module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
else:
module.exit_json(changed=True,
msg="System successfully registered to '%s'." % server_hostname,
subscribed_pool_ids=subscribed_pool_ids)
# Ensure system is *not* registered
if state == 'absent':
if not rhsm.is_registered:
module.exit_json(changed=False, msg="System already unregistered.")
else:
try:
rhsm.unsubscribe()
rhsm.unregister()
except Exception as e:
module.fail_json(msg="Failed to unregister: %s" % to_native(e))
else:
module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
if __name__ == '__main__':
main()
| bregman-arie/ansible | lib/ansible/modules/packaging/os/redhat_subscription.py | Python | gpl-3.0 | 28,269 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import six
from django.utils.translation import ugettext_lazy as _
from django.views.generic.detail import DetailView
from shoop.core.models import PaymentMethod, ShippingMethod
from shoop.utils.excs import Problem
from shoop.utils.importing import load
class _BaseMethodDetailView(DetailView):
model = None # Overridden below
title = _(u"Edit Details")
def dispatch(self, request, *args, **kwargs):
# This view only dispatches further to the method module's own detail view class
object = self.get_object()
module = object.module
if not module.admin_detail_view_class:
raise Problem("Module %s has no admin detail view" % module.name)
if isinstance(module.admin_detail_view_class, six.text_type):
view_class = load(module.admin_detail_view_class)
else:
view_class = module.admin_detail_view_class
kwargs["object"] = object
return view_class(model=self.model).dispatch(request, *args, **kwargs)
class ShippingMethodEditDetailView(_BaseMethodDetailView):
model = ShippingMethod
class PaymentMethodEditDetailView(_BaseMethodDetailView):
model = PaymentMethod
| lawzou/shoop | shoop/admin/modules/methods/views/edit_detail.py | Python | agpl-3.0 | 1,471 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Amrvis(MakefilePackage):
"""Amrvis is a visualization package specifically designed to
read and display output and profiling data from codes built
on the AMReX framework.
"""
homepage = "https://github.com/AMReX-Codes/Amrvis"
git = "https://github.com/AMReX-Codes/Amrvis.git"
version('main', tag='main')
variant(
'dims',
default='3',
values=('1', '2', '3'),
multi=False,
description='Number of spatial dimensions'
)
variant(
'prec',
default='DOUBLE',
values=('FLOAT', 'DOUBLE'),
multi=False,
description='Floating point precision'
)
variant('mpi', default=True, description='Enable MPI parallel support')
variant('debug', default=False, description='Enable debugging features')
variant('profiling', default=False,
description='Enable AMReX profiling features')
depends_on('gmake', type='build')
depends_on('mpi', when='+mpi')
depends_on('libsm')
depends_on('libice')
depends_on('libxpm')
depends_on('libx11')
depends_on('libxt')
depends_on('libxext')
depends_on('motif')
depends_on('flex')
depends_on('bison')
conflicts(
'+profiling', when='dims=1',
msg='Amrvis profiling support requires a 2D build'
)
conflicts(
'+profiling', when='dims=3',
msg='Amrvis profiling support requires a 2D build'
)
# Only doing gcc and clang at the moment.
# Intel currently fails searching for mpiicc, mpiicpc, etc.
for comp in ['%intel', '%cce', '%nag', '%pgi', '%xl', '%xl_r']:
conflicts(
comp,
msg='Amrvis currently only builds with gcc and clang'
)
# Need to clone AMReX into Amrvis because Amrvis uses AMReX's source
resource(name='amrex',
git='https://github.com/AMReX-Codes/amrex.git',
tag='development',
placement='amrex')
def edit(self, spec, prefix):
# libquadmath is only available x86_64 and powerle
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85440
if self.spec.target.family not in ['x86_64', 'ppc64le']:
comps = join_path('amrex', 'Tools', 'GNUMake', 'comps')
maks = [
join_path(comps, 'gnu.mak'),
join_path(comps, 'llvm.mak'),
]
for mak in maks:
filter_file('-lquadmath', '', mak)
# Set all available makefile options to values we want
makefile = FileFilter('GNUmakefile')
makefile.filter(
r'^AMREX_HOME\s*\?=.*',
'AMREX_HOME = {0}'.format('./amrex')
)
makefile.filter(
r'^PRECISION\s*=.*',
'PRECISION = {0}'.format(spec.variants['prec'].value)
)
makefile.filter(
r'^DIM\s*=.*',
'DIM = {0}'.format(spec.variants['dims'].value)
)
makefile.filter(
r'^PROFILE\s*=.*',
'PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^TRACE_PROFILE\s*=.*',
'TRACE_PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^COMM_PROFILE\s*=.*',
'COMM_PROFILE = {0}'.format(
spec.variants['profiling'].value
).upper()
)
makefile.filter(
r'^COMP\s*=.*',
'COMP = {0}'.format(self.compiler.name)
)
makefile.filter(
r'^DEBUG\s*=.*',
'DEBUG = {0}'.format(spec.variants['debug'].value).upper()
)
makefile.filter(
r'^USE_ARRAYVIEW\s*=.*',
'USE_ARRAY_VIEW = FALSE'
)
makefile.filter(
r'^USE_MPI\s*=.*',
'USE_MPI = {0}'.format(spec.variants['mpi'].value).upper()
)
makefile.filter(
r'^USE_CXX11\s*=.*',
'USE_CXX11 = TRUE'
)
makefile.filter(
r'^USE_VOLRENDER\s*=.*',
'USE_VOLRENDER = FALSE'
)
makefile.filter(
r'^USE_PARALLELVOLRENDER\s*=.*',
'USE_PARALLELVOLRENDER = FALSE'
)
makefile.filter(
r'^USE_PROFPARSER\s*=.*',
'USE_PROFPARSER = {0}'.format(
spec.variants['profiling'].value
).upper()
)
# A bit risky here deleting all /usr and /opt X
# library default search paths in makefile
makefile.filter(
r'^.*\b(usr|opt)\b.*$',
'# Spack removed INCLUDE_LOCATIONS and LIBRARY_LOCATIONS'
)
# Read GNUmakefile into array
with open('GNUmakefile', 'r') as file:
contents = file.readlines()
# Edit GNUmakefile includes and libraries to point to Spack
# dependencies.
# The safest bet is to put the LIBRARY_LOCATIONS and
# INCLUDE_LOCATIONS at the beginning of the makefile.
line_offset = 0
count = 0
for lib in ['libsm', 'libice', 'libxpm', 'libx11',
'libxt', 'libxext', 'motif']:
contents.insert(
line_offset + count,
'LIBRARY_LOCATIONS += {0}\n'.format(spec[lib].prefix.lib)
)
contents.insert(
line_offset + count + 1,
'INCLUDE_LOCATIONS += {0}\n'.format(spec[lib].prefix.include)
)
count += 1
# Write GNUmakefile
with open('GNUmakefile', 'w') as file:
file.writelines(contents)
def setup_build_environment(self, env):
# We don't want an AMREX_HOME the user may have set already
env.unset('AMREX_HOME')
# Help force Amrvis to not pick up random system compilers
if '+mpi' in self.spec:
env.set('MPI_HOME', self.spec['mpi'].prefix)
env.set('CC', self.spec['mpi'].mpicc)
env.set('CXX', self.spec['mpi'].mpicxx)
env.set('F77', self.spec['mpi'].mpif77)
env.set('FC', self.spec['mpi'].mpifc)
def install(self, spec, prefix):
# Install exe manually
mkdirp(prefix.bin)
install('*.ex', prefix.bin)
| LLNL/spack | var/spack/repos/builtin/packages/amrvis/package.py | Python | lgpl-2.1 | 6,541 |
import re
from .common import InfoExtractor
class TrailerAddictIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?traileraddict\.com/(?:trailer|clip)/(?P<movie>.+?)/(?P<trailer_name>.+)'
_TEST = {
u'url': u'http://www.traileraddict.com/trailer/prince-avalanche/trailer',
u'file': u'76184.mp4',
u'md5': u'57e39dbcf4142ceb8e1f242ff423fd71',
u'info_dict': {
u"title": u"Prince Avalanche Trailer",
u"description": u"Trailer for Prince Avalanche.Two highway road workers spend the summer of 1988 away from their city lives. The isolated landscape becomes a place of misadventure as the men find themselves at odds with each other and the women they left behind."
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('movie') + '/' + mobj.group('trailer_name')
webpage = self._download_webpage(url, name)
title = self._search_regex(r'<title>(.+?)</title>',
webpage, 'video title').replace(' - Trailer Addict','')
view_count = self._search_regex(r'Views: (.+?)<br />',
webpage, 'Views Count')
video_id = self._og_search_property('video', webpage, 'Video id').split('=')[1]
# Presence of (no)watchplus function indicates HD quality is available
if re.search(r'function (no)?watchplus()', webpage):
fvar = "fvarhd"
else:
fvar = "fvar"
info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
info_webpage = self._download_webpage(info_url, video_id , "Downloading the info webpage")
final_url = self._search_regex(r'&fileurl=(.+)',
info_webpage, 'Download url').replace('%3F','?')
thumbnail_url = self._search_regex(r'&image=(.+?)&',
info_webpage, 'thumbnail url')
ext = final_url.split('.')[-1].split('?')[0]
return [{
'id' : video_id,
'url' : final_url,
'ext' : ext,
'title' : title,
'thumbnail' : thumbnail_url,
'description' : self._og_search_description(webpage),
'view_count' : view_count,
}]
| ashutosh-mishra/youtube-dl | youtube_dl/extractor/traileraddict.py | Python | unlicense | 2,278 |
# PyKinect
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import os
import ctypes
from os import path
_audio_path = path.join(path.dirname(__file__), '..', 'pykinect', 'audio', 'PyKinectAudio.dll')
if not os.path.exists(_audio_path):
_audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug', 'PyKinectAudio.dll')
if not path.exists(_audio_path):
raise Exception('Cannot find PyKinectAudio.dll')
_PYAUDIODLL = ctypes.CDLL(_audio_path)
_CreateRecognizer = _PYAUDIODLL.CreateRecognizer
_CreateRecognizer.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_CreateRecognizer.restype = ctypes.HRESULT
_SetInputFile = _PYAUDIODLL.SetInputFile
_SetInputFile.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputFile.restype = ctypes.HRESULT
_SetInputStream = _PYAUDIODLL.SetInputStream
_SetInputStream.argtypes = [ctypes.c_voidp, ctypes.c_voidp]
_SetInputStream.restype = ctypes.HRESULT
_IUnknownRelease = _PYAUDIODLL.IUnknownRelease
_IUnknownRelease.argtypes = [ctypes.c_voidp]
_IUnknownRelease.restype = None
_LoadGrammar = _PYAUDIODLL.LoadGrammar
_LoadGrammar.argtypes = [ctypes.c_wchar_p, ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)]
_LoadGrammar.restype = ctypes.HRESULT
_EnumRecognizers = _PYAUDIODLL.EnumRecognizers
_ReadCallback = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_uint32))
_Recognize_Callback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p)
_RecognizeOne = _PYAUDIODLL.RecognizeOne
_RecognizeOne.argtypes = [ctypes.c_voidp, ctypes.c_uint32, _Recognize_Callback, _Recognize_Callback]
_RecognizeOne.restype = ctypes.HRESULT
_RecognizeAsync = _PYAUDIODLL.RecognizeAsync
_RecognizeAsync.argtypes = [ctypes.c_voidp, ctypes.c_uint, _Recognize_Callback, _Recognize_Callback, ctypes.POINTER(ctypes.c_voidp)]
_RecognizeAsync.restype = ctypes.HRESULT
_StopRecognizeAsync = _PYAUDIODLL.StopRecognizeAsync
_StopRecognizeAsync.argtypes = [ctypes.c_voidp]
_StopRecognizeAsync.restype = ctypes.HRESULT
_EnumRecognizersCallback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_voidp)
class Grammar(object):
"""Represents a speech grammar constructed from an XML file"""
def __init__(self, filename):
self.filename = filename
def __del__(self):
#_IUnknownRelease(self._reco_ctx)
_IUnknownRelease(self._grammar)
class RecognizerInfo(object):
def __init__(self, id, description, token):
self.id = id
self.description = description
self._token = token
def __del__(self):
_IUnknownRelease(self._token)
def __repr__(self):
return 'RecognizerInfo(%r, %r, ...)' % (self.id, self.description)
class RecognitionResult(object):
def __init__(self, text, alternates = None):
self.text = text
if alternates:
self.alternates = tuple(RecognitionResult(alt) for alt in alternates)
else:
self.alternates = ()
class _event(object):
"""class used for adding/removing/invoking a set of listener functions"""
__slots__ = ['handlers']
def __init__(self):
self.handlers = []
def __iadd__(self, other):
self.handlers.append(other)
return self
def __isub__(self, other):
self.handlers.remove(other)
return self
def fire(self, *args):
for handler in self.handlers:
handler(*args)
class RecognitionEventArgs(object):
"""Provides information about speech recognition events."""
def __init__(self, result):
self.result = result
class SpeechRecognitionEngine(object):
"""Provides the means to access and manage an in-process speech recognition engine."""
def __init__(self, recognizer = None):
self.speech_recognized = _event()
self._async_handle = None
if isinstance(recognizer, str):
# TODO: Lookup by ID
pass
elif isinstance(recognizer, RecognizerInfo):
rec = ctypes.c_voidp()
_CreateRecognizer(recognizer._token, ctypes.byref(rec))
self._rec = rec
elif recognizer is None:
rec = ctypes.c_voidp()
_CreateRecognizer(None, ctypes.byref(rec))
self._rec = rec
else:
raise TypeError('Bad type for recognizer: ' + repr(recognizer))
def __del__(self):
# TODO: Need to shut down any listening threads
self.recognize_async_stop()
_IUnknownRelease(self._rec)
def load_grammar(self, grammar):
if isinstance(grammar, str):
grammar_obj = Grammar(grammar)
else:
grammar_obj = grammar
comGrammar = ctypes.c_voidp()
_LoadGrammar(grammar_obj.filename, self._rec, ctypes.byref(comGrammar))
grammar_obj._grammar = comGrammar
return grammar_obj
def set_input_to_audio_file(self, stream):
"""sets the input to a Python file-like object which implements read"""
stream_obj = getattr(stream, '__ISpStreamFormat__', None)
if stream_obj is not None:
# optimization: we can avoid going through Python to do the reading by passing
# the original ISpStreamFormat object through
_SetInputStream(self._rec, stream_obj)
else:
def reader(byteCount, buffer, bytesRead):
bytes = stream.read(byteCount)
ctypes.memmove(buffer, bytes, len(bytes))
bytesRead.contents.value = len(bytes)
return 0
self._reader = _ReadCallback(reader)
_SetInputFile(self._rec, self._reader)
def recognize_sync(self, timeout = 30000):
"""attempts to recognize speech and returns the recognized text.
By default times out after 30 seconds"""
res = []
alts = []
def callback(text):
res.append(text)
def alt_callback(text):
if text is not None:
alts.append(text)
_RecognizeOne(self._rec, timeout, _Recognize_Callback(callback), _Recognize_Callback(alt_callback))
if res:
return RecognitionResult(res[0], alts)
return None
def recognize_async(self, multiple = False):
cur_result = []
def callback(text):
cur_result.append(text)
def alt_callback(text):
if text == None:
# send the event
result = RecognitionResult(cur_result[0], cur_result[1:])
event_args = RecognitionEventArgs(result)
self.speech_recognized.fire(event_args)
del cur_result[:]
else:
cur_result.append(text)
stop_listening_handle = ctypes.c_voidp()
# keep alive our function pointers on ourselves...
self._async_callback = async_callback =_Recognize_Callback(callback)
self._async_alt_callback = async_alt_callback = _Recognize_Callback(alt_callback)
_RecognizeAsync(self._rec, multiple, async_callback, async_alt_callback, ctypes.byref(stop_listening_handle))
self._async_handle = stop_listening_handle
def recognize_async_stop(self):
if self._async_handle is not None:
_StopRecognizeAsync(self._async_handle)
self._async_handle = None
@staticmethod
def installed_recognizers():
ids = []
def callback(id, description, token):
ids.append(RecognizerInfo(id, description, token))
_EnumRecognizers(_EnumRecognizersCallback(callback))
return ids
| DinoV/PTVS | Python/Product/PyKinect/PyKinect/winspeech/recognition.py | Python | apache-2.0 | 8,335 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.pages.project.compute.volumes.\
volumespage import VolumesPage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
class VolumesnapshotsTable(tables.TableRegion):
name = 'volume_snapshots'
marker_name = 'snapshot_marker'
prev_marker_name = 'prev_snapshot_marker'
EDIT_SNAPSHOT_FORM_FIELDS = ("name", "description")
CREATE_VOLUME_FORM_FIELDS = (
"name", "description", "snapshot_source", "type", "size")
@tables.bind_table_action('delete')
def delete_volume_snapshots(self, delete_button):
"""Batch Delete table action."""
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('delete')
def delete_volume_snapshot(self, delete_button, row):
"""Per-entity delete row action."""
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf)
@tables.bind_row_action('edit')
def edit_snapshot(self, edit_button, row):
edit_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.EDIT_SNAPSHOT_FORM_FIELDS)
@tables.bind_row_action('create_from_snapshot')
def create_volume(self, create_volume_button, row):
create_volume_button.click()
return forms.FormRegion(self.driver, self.conf,
field_mappings=self.CREATE_VOLUME_FORM_FIELDS)
class VolumesnapshotsPage(basepage.BaseNavigationPage):
SNAPSHOT_TABLE_NAME_COLUMN = 'name'
SNAPSHOT_TABLE_STATUS_COLUMN = 'status'
SNAPSHOT_TABLE_VOLUME_NAME_COLUMN = 'volume_name'
_volumes_tab_locator = (
by.By.CSS_SELECTOR,
'a[href*="tab=volumes_and_snapshots__volumes_tab"]')
def __init__(self, driver, conf):
super(VolumesnapshotsPage, self).__init__(driver, conf)
self._page_title = "Volumes"
@property
def volumesnapshots_table(self):
return VolumesnapshotsTable(self.driver, self.conf)
def switch_to_volumes_tab(self):
self._get_element(*self._volumes_tab_locator).click()
return VolumesPage(self.driver, self.conf)
def _get_row_with_volume_snapshot_name(self, name):
return self.volumesnapshots_table.get_row(
self.SNAPSHOT_TABLE_NAME_COLUMN,
name)
def is_snapshot_present(self, name):
return bool(self._get_row_with_volume_snapshot_name(name))
def delete_volume_snapshot(self, name):
row = self._get_row_with_volume_snapshot_name(name)
confirm_form = self.volumesnapshots_table.delete_volume_snapshot(row)
confirm_form.submit()
def delete_volume_snapshots(self, names):
for name in names:
row = self._get_row_with_volume_snapshot_name(name)
row.mark()
confirm_form = self.volumesnapshots_table.delete_volume_snapshots()
confirm_form.submit()
def is_volume_snapshot_deleted(self, name):
return self.volumesnapshots_table.is_row_deleted(
lambda: self._get_row_with_volume_snapshot_name(name))
def is_volume_snapshot_available(self, name):
def cell_getter():
row = self._get_row_with_volume_snapshot_name(name)
return row and row.cells[self.SNAPSHOT_TABLE_STATUS_COLUMN]
return bool(self.volumesnapshots_table.wait_cell_status(cell_getter,
'Available'))
def get_volume_name(self, snapshot_name):
row = self._get_row_with_volume_snapshot_name(snapshot_name)
return row.cells[self.SNAPSHOT_TABLE_VOLUME_NAME_COLUMN].text
def edit_snapshot(self, name, new_name=None, description=None):
row = self._get_row_with_volume_snapshot_name(name)
snapshot_edit_form = self.volumesnapshots_table.edit_snapshot(row)
if new_name:
snapshot_edit_form.name.text = new_name
if description:
snapshot_edit_form.description.text = description
snapshot_edit_form.submit()
def create_volume_from_snapshot(self, snapshot_name, volume_name=None,
description=None, volume_size=None):
row = self._get_row_with_volume_snapshot_name(snapshot_name)
volume_form = self.volumesnapshots_table.create_volume(row)
if volume_name:
volume_form.name.text = volume_name
if description:
volume_form.description.text = description
if volume_size is None:
volume_size = self.conf.volume.volume_size
volume_form.size.value = volume_size
volume_form.submit()
| Mirantis/mos-horizon | openstack_dashboard/test/integration_tests/pages/project/compute/volumes/volumesnapshotspage.py | Python | apache-2.0 | 5,448 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An example that verifies the counts and includes best practices.
On top of the basic concepts in the wordcount example, this workflow introduces
logging to Cloud Logging, and using assertions in a Dataflow pipeline.
To execute this pipeline locally, specify a local output file or output prefix
on GCS::
--output [YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
To execute this pipeline using the Google Cloud Dataflow service, specify
pipeline configuration::
--project YOUR_PROJECT_ID
--staging_location gs://YOUR_STAGING_DIRECTORY
--temp_location gs://YOUR_TEMP_DIRECTORY
--job_name YOUR_JOB_NAME
--runner DataflowRunner
and an output prefix on GCS::
--output gs://YOUR_OUTPUT_PREFIX
"""
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
super(FilterTextFn, self).__init__()
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Those
# values will be available in the monitoring system of the runner used
# to run the pipeline. These metrics below track the number of
# matched and unmatched words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different log
# levels can be used to control the verbosity of logging providing an
# effective mechanism to filter less important information.
# Note currently only "INFO" and higher level logs are emitted to the
# Cloud Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
self.umatched_words.inc()
class CountWords(beam.PTransform):
"""A transform to count the occurrences of each word.
A PTransform that converts a PCollection containing lines of text into a
PCollection of (word, count) tuples.
"""
def expand(self, pcoll):
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
return (pcoll
| 'split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
def run(argv=None):
"""Runs the debugging wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read the text file[pattern] into a PCollection, count the occurrences of
# each word and filter by a list of words.
filtered_words = (
p | 'read' >> ReadFromText(known_args.input)
| CountWords()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# assert_that is a convenient PTransform that checks a PCollection has an
# expected value. Asserts are best used in unit tests with small data sets
# but is demonstrated here as a teaching tool.
#
# Note assert_that does not provide any output and that successful
# completion of the Pipeline implies that the expectations were met. Learn
# more at https://cloud.google.com/dataflow/pipelines/testing-your-pipeline
# on how to best test your pipeline.
assert_that(
filtered_words, equal_to([('Flourish', 3), ('stomach', 1)]))
# Format the counts into a PCollection of strings and write the output using
# a "Write" transform that has side effects.
# pylint: disable=unused-variable
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
output = (filtered_words
| 'format' >> beam.Map(format_result)
| 'write' >> WriteToText(known_args.output))
if __name__ == '__main__':
# Cloud Logging would contain only logging.INFO and higher level logs logged
# by the root logger. All log statements emitted by the root logger will be
# visible in the Cloud Logging UI. Learn more at
# https://cloud.google.com/logging about the Cloud Logging UI.
#
# You can set the default logging level to a different level when running
# locally.
logging.getLogger().setLevel(logging.INFO)
run()
| mxm/incubator-beam | sdks/python/apache_beam/examples/wordcount_debugging.py | Python | apache-2.0 | 6,652 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A tf.distribute.Strategy for running on a single device."""
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import input_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute.v1 import input_lib as input_lib_v1
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(josh11b): Do we wrap values in types to generate errors if you are
# doing something that won't work with other DistributionStrategy
# implementations?
@tf_export("distribute.OneDeviceStrategy", v1=[])
class OneDeviceStrategy(distribute_lib.Strategy):
"""A distribution strategy for running on a single device.
Using this strategy will place any variables created in its scope on the
specified device. Input distributed through this strategy will be
prefetched to the specified device. Moreover, any functions called via
`strategy.run` will also be placed on the specified device
as well.
Typical usage of this strategy could be testing your code with the
tf.distribute.Strategy API before switching to other strategies which
actually distribute to multiple devices/machines.
For example:
```
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
with strategy.scope():
v = tf.Variable(1.0)
print(v.device) # /job:localhost/replica:0/task:0/device:GPU:0
def step_fn(x):
return x * 2
result = 0
for i in range(10):
result += strategy.run(step_fn, args=(i,))
print(result) # 90
```
"""
def __init__(self, device):
"""Creates a `OneDeviceStrategy`.
Args:
device: Device string identifier for the device on which the variables
should be placed. See class docs for more details on how the device is
used. Examples: "/cpu:0", "/gpu:0", "/device:CPU:0", "/device:GPU:0"
"""
super(OneDeviceStrategy, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"OneDeviceStrategy")
def experimental_distribute_dataset(self, dataset, options=None): # pylint: disable=useless-super-delegation
"""Distributes a tf.data.Dataset instance provided via dataset.
In this case, there is only one device, so this is only a thin wrapper
around the input dataset. It will, however, prefetch the input data to the
specified device. The returned distributed dataset can be iterated over
similar to how regular datasets can.
NOTE: Currently, the user cannot add any more transformations to a
distributed dataset.
Example:
```
strategy = tf.distribute.OneDeviceStrategy()
dataset = tf.data.Dataset.range(10).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
for x in dist_dataset:
print(x) # [0, 1], [2, 3],...
```
Args:
dataset: `tf.data.Dataset` to be prefetched to device.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`" that the caller can iterate over.
"""
return super(OneDeviceStrategy, self).experimental_distribute_dataset(
dataset, options)
def distribute_datasets_from_function(
self,
dataset_fn, # pylint: disable=useless-super-delegation
options=None):
"""Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`.
`dataset_fn` will be called once for each worker in the strategy. In this
case, we only have one worker and one device so `dataset_fn` is called
once.
The `dataset_fn` should take an `tf.distribute.InputContext` instance where
information about batching and input replication can be accessed:
```
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size)
return d.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
inputs = strategy.distribute_datasets_from_function(dataset_fn)
for batch in inputs:
replica_results = strategy.run(replica_fn, args=(batch,))
```
IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a
per-replica batch size, unlike `experimental_distribute_dataset`, which uses
the global batch size. This may be computed using
`input_context.get_per_replica_batch_size`.
Args:
dataset_fn: A function taking a `tf.distribute.InputContext` instance and
returning a `tf.data.Dataset`.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
Returns:
A "distributed `Dataset`", which the caller can iterate over like regular
datasets.
"""
return super(OneDeviceStrategy,
self).distribute_datasets_from_function(dataset_fn, options)
def experimental_local_results(self, value): # pylint: disable=useless-super-delegation
"""Returns the list of all local per-replica values contained in `value`.
In `OneDeviceStrategy`, the `value` is always expected to be a single
value, so the result is just the value in a tuple.
Args:
value: A value returned by `experimental_run()`, `run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
"""
return super(OneDeviceStrategy, self).experimental_local_results(value)
def run(self, fn, args=(), kwargs=None, options=None): # pylint: disable=useless-super-delegation
"""Run `fn` on each replica, with the given arguments.
In `OneDeviceStrategy`, `fn` is simply called within a device scope for the
given device, with the provided arguments.
Args:
fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
args: (Optional) Positional arguments to `fn`.
kwargs: (Optional) Keyword arguments to `fn`.
options: (Optional) An instance of `tf.distribute.RunOptions` specifying
the options to run `fn`.
Returns:
Return value from running `fn`.
"""
return super(OneDeviceStrategy, self).run(fn, args, kwargs, options)
def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation
"""Reduce `value` across replicas.
In `OneDeviceStrategy`, there is only one replica, so if axis=None, value
is simply returned. If axis is specified as something other than None,
such as axis=0, value is reduced along that axis and returned.
Example:
```
t = tf.range(10)
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=None).numpy()
# result: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
result = strategy.reduce(tf.distribute.ReduceOp.SUM, t, axis=0).numpy()
# result: 45
```
Args:
reduce_op: A `tf.distribute.ReduceOp` value specifying how values should
be combined.
value: A "per replica" value, e.g. returned by `run` to
be combined into a single tensor.
axis: Specifies the dimension to reduce along within each
replica's tensor. Should typically be set to the batch dimension, or
`None` to only reduce across replicas (e.g. if the tensor has no batch
dimension).
Returns:
A `Tensor`.
"""
return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)
def scope(self): # pylint: disable=useless-super-delegation
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
In `OneDeviceStrategy`, all variables created inside `strategy.scope()`
will be on `device` specified at strategy construction time.
See example in the docs for this class.
Returns:
A context manager to use for creating variables with this strategy.
"""
return super(OneDeviceStrategy, self).scope()
@tf_export(v1=["distribute.OneDeviceStrategy"]) # pylint: disable=empty-docstring
class OneDeviceStrategyV1(distribute_lib.StrategyV1):
__doc__ = OneDeviceStrategy.__doc__.replace(
"For example:\n ```",
"For example:\n ```\n tf.enable_eager_execution()")
def __init__(self, device):
super(OneDeviceStrategyV1, self).__init__(OneDeviceExtended(self, device))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"OneDeviceStrategy")
__init__.__doc__ = OneDeviceStrategy.__init__.__doc__
# TODO(josh11b): Switch to V2 after callers have been updated to only V2 APIs.
class OneDeviceExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of OneDeviceStrategy."""
def __init__(self, container_strategy, device):
super(OneDeviceExtended, self).__init__(container_strategy)
self._device = device_util.resolve(device)
self._input_device = device_util.get_host_for_device(self._device)
def _input_workers_with_options(self, options=None):
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers([(self._input_device, (self._device,))])
else:
return input_lib.InputWorkers([(self._input_device,
(self._input_device,))])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _create_variable(self, next_creator, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
with ops.device(self._device):
return next_creator(**kwargs)
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(**kwargs)
else:
with ops.colocate_with(colocate_with):
return next_creator(**kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterator from dataset without splitting the batch."""
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
return input_lib_v1.DatasetIterator(dataset, self._input_workers,
self._container_strategy())
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._input_device), session)
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
def _experimental_distribute_dataset(self, dataset, options):
# Note that split_batch_by argument is not passed because it is always 1 in
# this strategy, and adding it adds unnecessary overhead to the dataset.
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function`."
)
return input_util.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
return input_util.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options),
[distribute_lib.InputContext()],
self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
# TODO(b/137795644): This should return a PerReplica value but other
# methods like run in OneDeviceStrategy need to be modified
# to do the same.
return value_fn(distribute_lib.ValueContext())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
# TODO(priyag): Use max_iterations instead of an explicit counter.
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
strategy = self._container_strategy()
with ops.device(self._device), _OneDeviceReplicaContext(strategy):
return fn(*args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations, options):
del reduce_op, destinations, options
return value
def _gather_to_implementation(self, value, destinations, axis, options):
del destinations, axis, options
return value
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._device), distribute_lib.UpdateContext(self._device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
return array_ops.identity(replica_local_var)
def _local_results(self, value):
return (value,)
def value_container(self, value):
return value
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return False
@property
def _num_replicas_in_sync(self):
return 1
@property
def worker_devices(self):
return (self._device,)
@property
def parameter_devices(self):
return (self._device,)
def non_slot_devices(self, var_list):
del var_list
return (self._device,)
@property
def experimental_should_init(self):
return True
@property
def experimental_between_graph(self):
return False
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""Global and per-replica batching are equivalent for OneDeviceStrategy."""
return True
@property
def _support_per_replica_values(self):
return False
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
class _OneDeviceReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext for OneDeviceStrategy."""
def __init__(self, strategy):
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=0)
@property
def devices(self):
return self._strategy.extended.worker_devices
| tensorflow/tensorflow | tensorflow/python/distribute/one_device_strategy.py | Python | apache-2.0 | 18,606 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pprof_profiler."""
import gzip
from proto import profile_pb2
from tensorflow.core.framework import step_stats_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.profiler import pprof_profiler
class PprofProfilerTest(test.TestCase):
def testDataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
graph.get_operations.return_value = []
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testRunMetadataEmpty(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [('a/b/file1', 10, 'some_var')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(0, len(profiles))
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(0, len(profile_files))
def testValidProfile(self):
output_dir = test.get_temp_dir()
run_metadata = config_pb2.RunMetadata()
node1 = step_stats_pb2.NodeExecStats(
node_name='Add/123',
op_start_rel_micros=3,
op_end_rel_micros=5,
all_end_rel_micros=4)
run_metadata = config_pb2.RunMetadata()
device1 = run_metadata.step_stats.dev_stats.add()
device1.device = 'deviceA'
device1.node_stats.extend([node1])
graph = test.mock.MagicMock()
op1 = test.mock.MagicMock()
op1.name = 'Add/123'
op1.traceback = [
('a/b/file1', 10, 'apply_op', 'abc'), ('a/c/file2', 12, 'my_op', 'def')]
op1.type = 'add'
graph.get_operations.return_value = [op1]
expected_proto = """sample_type {
type: 5
unit: 5
}
sample_type {
type: 6
unit: 7
}
sample_type {
type: 8
unit: 7
}
sample {
value: 1
value: 4
value: 2
label {
key: 1
str: 2
}
label {
key: 3
str: 4
}
}
string_table: ""
string_table: "node_name"
string_table: "Add/123"
string_table: "op_type"
string_table: "add"
string_table: "count"
string_table: "all_time"
string_table: "nanoseconds"
string_table: "op_time"
string_table: "Device 1 of 1: deviceA"
comment: 9
"""
# Test with protos
profiles = pprof_profiler.get_profiles(graph, run_metadata)
self.assertEqual(1, len(profiles))
self.assertTrue('deviceA' in profiles)
self.assertEqual(expected_proto, str(profiles['deviceA']))
# Test with files
profile_files = pprof_profiler.profile(
graph, run_metadata, output_dir)
self.assertEqual(1, len(profile_files))
with gzip.open(profile_files[0]) as profile_file:
profile_contents = profile_file.read()
profile = profile_pb2.Profile()
profile.ParseFromString(profile_contents)
self.assertEqual(expected_proto, str(profile))
@test_util.run_v1_only('b/120545219')
def testProfileWithWhileLoop(self):
options = config_pb2.RunOptions()
options.trace_level = config_pb2.RunOptions.FULL_TRACE
run_metadata = config_pb2.RunMetadata()
num_iters = 5
with self.cached_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, num_iters)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
sess.run(r, options=options, run_metadata=run_metadata)
profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
self.assertEqual(1, len(profiles))
profile = next(iter(profiles.values()))
add_samples = [] # Samples for the while/Add node
for sample in profile.sample:
if profile.string_table[sample.label[0].str] == 'while/Add':
add_samples.append(sample)
# Values for same nodes are aggregated.
self.assertEqual(1, len(add_samples))
# Value of "count" should be equal to number of iterations.
self.assertEqual(num_iters, add_samples[0].value[0])
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/profiler/pprof_profiler_test.py | Python | apache-2.0 | 5,145 |
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Horizontal graph base
"""
from pygal.graph.graph import Graph
from pygal.view import HorizontalView, HorizontalLogView
class HorizontalGraph(Graph):
"""Horizontal graph"""
def __init__(self, *args, **kwargs):
self.horizontal = True
super(HorizontalGraph, self).__init__(*args, **kwargs)
def _post_compute(self):
self._x_labels, self._y_labels = self._y_labels, self._x_labels
self._x_2nd_labels, self._y_2nd_labels = (
self._y_2nd_labels, self._x_2nd_labels)
def _axes(self):
self.view._force_vertical = True
super(HorizontalGraph, self)._axes()
self.view._force_vertical = False
def _set_view(self):
"""Assign a view to current graph"""
if self.logarithmic:
view_class = HorizontalLogView
else:
view_class = HorizontalView
self.view = view_class(
self.width - self.margin.x,
self.height - self.margin.y,
self._box)
| mytliulei/DCNRobotInstallPackages | windows/win32/pygal-1.7.0/pygal/graph/horizontal.py | Python | apache-2.0 | 1,780 |
import random
from tests.checks.common import AgentCheckTest, load_check
from utils.containers import hash_mutable
MOCK_CONFIG = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
}]
}
MOCK_CONFIG_SERVICE_WHITELIST = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'service_whitelist': ['service_{0}'.format(k) for k in range(70)]
}]
}
MOCK_CONFIG_LEADER_CHECK = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}]
}
MOCK_CONFIG_SELF_LEADER_CHECK = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'self_leader_check': True
}]
}
MOCK_CONFIG_NETWORK_LATENCY_CHECKS = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'network_latency_checks': True
}]
}
MOCK_BAD_CONFIG = {
'init_config': {},
'instances' : [{ # Multiple instances should cause it to fail
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}, {
'url': 'http://localhost:8501',
'catalog_checks': True,
'new_leader_checks': True,
'self_leader_check': True
}]
}
def _get_random_ip():
rand_int = int(15 * random.random()) + 10
return "10.0.2.{0}".format(rand_int)
class TestCheckConsul(AgentCheckTest):
CHECK_NAME = 'consul'
def mock_get_peers_in_cluster(self, instance):
return [
"10.0.2.14:8300",
"10.0.2.15:8300",
"10.0.2.16:8300"
]
def mock_get_services_in_cluster(self, instance):
return {
"service-1": [
"az-us-east-1a"
],
"service-2": [
"az-us-east-1a"
],
"service-3": [
"az-us-east-1a"
],
"service-4": [
"az-us-east-1a"
],
"service-5": [
"az-us-east-1a"
],
"service-6": [
"az-us-east-1a"
]
}
def mock_get_n_services_in_cluster(self, n):
dct = {}
for i in range(n):
k = "service_{0}".format(i)
dct[k] = []
return dct
def mock_get_local_config(self, instance, instance_state):
return {
"Config": {
"AdvertiseAddr": "10.0.2.15",
"Datacenter": "dc1",
"Ports": {
"DNS": 8600,
"HTTP": 8500,
"HTTPS": -1,
"RPC": 8400,
"SerfLan": 8301,
"SerfWan": 8302,
"Server": 8300
},
}
}
def mock_get_nodes_in_cluster(self, instance):
return [
{
"Address": "10.0.2.15",
"Node": "node-1"
},
{
"Address": "10.0.2.25",
"Node": "node-2"
},
{
"Address": "10.0.2.35",
"Node": "node-2"
},
]
def mock_get_nodes_with_service(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "passing"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_nodes_with_service_warning(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "warning"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_nodes_with_service_critical(self, instance, service):
return [
{
"Checks": [
{
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Node": "node-1",
"Notes": "",
"Output": "Agent alive and reachable",
"ServiceID": "",
"ServiceName": "",
"Status": "passing"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "warning"
},
{
"CheckID": "service:{0}".format(service),
"Name": "service check {0}".format(service),
"Node": "node-1",
"Notes": "",
"Output": "Service {0} alive".format(service),
"ServiceID": service,
"ServiceName": "",
"Status": "critical"
}
],
"Node": {
"Address": _get_random_ip(),
"Node": "node-1"
},
"Service": {
"Address": "",
"ID": service,
"Port": 80,
"Service": service,
"Tags": [
"az-us-east-1a"
]
}
}
]
def mock_get_coord_datacenters(self, instance):
return [{
"Datacenter": "dc1",
"Coordinates": [
{
"Node": "host-1",
"Coord": {
"Vec": [
0.036520147625677804,
-0.00453289164613373,
-0.020523210880196232,
-0.02699760529719879,
-0.02689207977655939,
-0.01993826834797845,
-0.013022029942846501,
-0.002101656069659926
],
"Error": 0.11137306578107628,
"Adjustment": -0.00021065907491393056,
"Height": 1.1109163532378512e-05
}
}]
}, {
"Datacenter": "dc2",
"Coordinates": [
{
"Node": "host-2",
"Coord": {
"Vec": [
0.03548568620505946,
-0.0038202417296129025,
-0.01987440114252717,
-0.026223108843980016,
-0.026581965209197853,
-0.01891384862245717,
-0.013677323575279184,
-0.0014257906933581217
],
"Error": 0.06388569381495224,
"Adjustment": -0.00036731776343708724,
"Height": 8.962823816793629e-05
}
}]
}]
def mock_get_coord_nodes(self, instance):
return [{
"Node": "host-1",
"Coord": {
"Vec": [
0.007682993877165208,
0.002411059340215172,
0.0016420746641640123,
0.0037411046929292906,
0.004541946058965728,
0.0032195622863890523,
-0.0039447666794166095,
-0.0021767019427297815
],
"Error": 0.28019529748212335,
"Adjustment": -9.966407036439966e-05,
"Height": 0.00011777098790169723
}
}, {
"Node": "host-2",
"Coord": {
"Vec": [
0.007725239390196322,
0.0025160987581685982,
0.0017412811939227935,
0.003740935739394932,
0.004628794642643524,
0.003190871896051593,
-0.004058197296573195,
-0.002108437352702053
],
"Error": 0.31518043241386984,
"Adjustment": -0.00012274366490350246,
"Height": 0.00015006836008626717
}
}]
def mock_get_cluster_leader_A(self, instance):
return '10.0.2.15:8300'
def mock_get_cluster_leader_B(self, instance):
return 'My New Leader'
def _get_consul_mocks(self):
return {
'get_services_in_cluster': self.mock_get_services_in_cluster,
'get_nodes_with_service': self.mock_get_nodes_with_service,
'get_peers_in_cluster': self.mock_get_peers_in_cluster,
'_get_local_config': self.mock_get_local_config,
'_get_cluster_leader': self.mock_get_cluster_leader_A,
'_get_coord_datacenters': self.mock_get_coord_datacenters,
'_get_coord_nodes': self.mock_get_coord_nodes,
}
def test_get_nodes_with_service(self):
self.run_check(MOCK_CONFIG, mocks=self._get_consul_mocks())
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_nodes_with_service_warning(self):
my_mocks = self._get_consul_mocks()
my_mocks['get_nodes_with_service'] = self.mock_get_nodes_with_service_warning
self.run_check(MOCK_CONFIG, mocks=my_mocks)
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_nodes_with_service_critical(self):
my_mocks = self._get_consul_mocks()
my_mocks['get_nodes_with_service'] = self.mock_get_nodes_with_service_critical
self.run_check(MOCK_CONFIG, mocks=my_mocks)
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_passing', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_warning', value=0, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.nodes_critical', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_passing', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_warning', value=0, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
self.assertMetric('consul.catalog.services_critical', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_get_peers_in_cluster(self):
mocks = self._get_consul_mocks()
# When node is leader
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:leader'])
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
# When node is follower
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:follower'])
def test_cull_services_list(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
# Pad num_services to kick in truncation logic
num_services = self.check.MAX_SERVICES + 20
# Big whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(num_services)]
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Whitelist < MAX_SERVICES should spit out the whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
# No whitelist, still triggers truncation
whitelist = []
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Num. services < MAX_SERVICES should be no-op in absence of whitelist
num_services = self.check.MAX_SERVICES - 1
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)
# Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
num_services = self.check.MAX_SERVICES - 1
whitelist = ['service_1', 'service_2', 'service_3']
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
def test_new_leader_event(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
instance_hash = hash_mutable(MOCK_CONFIG_LEADER_CHECK['instances'][0])
self.check._instance_states[instance_hash].last_known_leader = 'My Old Leader'
mocks = self._get_consul_mocks()
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
self.run_check(MOCK_CONFIG_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:My Old Leader', event['tags'])
self.assertIn('curr_consul_leader:My New Leader', event['tags'])
def test_self_leader_event(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_SELF_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
instance_hash = hash_mutable(MOCK_CONFIG_SELF_LEADER_CHECK['instances'][0])
self.check._instance_states[instance_hash].last_known_leader = 'My Old Leader'
mocks = self._get_consul_mocks()
our_url = self.mock_get_cluster_leader_A(None)
other_url = self.mock_get_cluster_leader_B(None)
# We become the leader
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_A
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
self.assertEqual(our_url, self.check._instance_states[instance_hash].last_known_leader)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:My Old Leader', event['tags'])
self.assertIn('curr_consul_leader:%s' % our_url, event['tags'])
# We are already the leader, no new events
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 0)
# We lose the leader, no new events
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 0)
self.assertEqual(other_url, self.check._instance_states[instance_hash].last_known_leader)
# We regain the leadership
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_A
self.run_check(MOCK_CONFIG_SELF_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
self.assertEqual(our_url, self.check._instance_states[instance_hash].last_known_leader)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:%s' % other_url, event['tags'])
self.assertIn('curr_consul_leader:%s' % our_url, event['tags'])
def test_network_latency_checks(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_NETWORK_LATENCY_CHECKS,
self.DEFAULT_AGENT_CONFIG)
mocks = self._get_consul_mocks()
# We start out as the leader, and stay that way
instance_hash = hash_mutable(MOCK_CONFIG_NETWORK_LATENCY_CHECKS['instances'][0])
self.check._instance_states[instance_hash].last_known_leader = self.mock_get_cluster_leader_A(None)
self.run_check(MOCK_CONFIG_NETWORK_LATENCY_CHECKS, mocks=mocks)
latency = [m for m in self.metrics if m[0].startswith('consul.net.')]
latency.sort()
# Make sure we have the expected number of metrics
self.assertEquals(19, len(latency))
# Only 3 dc-latency metrics since we only do source = self
dc = [m for m in latency if '.dc.latency.' in m[0]]
self.assertEquals(3, len(dc))
self.assertEquals(1.6746410750238774, dc[0][2])
# 16 latency metrics, 2 nodes * 8 metrics each
node = [m for m in latency if '.node.latency.' in m[0]]
self.assertEquals(16, len(node))
self.assertEquals(0.26577747932995816, node[0][2])
| Wattpad/dd-agent | tests/checks/mock/test_consul.py | Python | bsd-3-clause | 21,895 |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "[email protected]", "password")
cls.jane = User.objects.create_user("jane", "[email protected]", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
msg = (
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's Manager "
"instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.bob.group_set.set([])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
msg = (
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use m2m_through_regress.Membership's Manager "
"instead."
)
with self.assertRaisesMessage(AttributeError, msg):
self.roll.members.set([])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
with self.assertRaises(AttributeError):
self.rock.members.create(name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
with self.assertRaises(AttributeError):
self.bob.group_set.create(name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"""
Too many copies of the intermediate table aren't involved when doing a
join (#8046, #8254).
"""
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": '
'100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": '
'"Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]'
% pks
)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"""
Sequences on an m2m_through are created for the through model, not a
phantom auto-generated m2m table (#11107).
"""
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user"'
': 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, '
'"model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]'
)
| frishberg/django | tests/m2m_through_regress/tests.py | Python | bsd-3-clause | 10,478 |
'''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import io
import pickle
import re
import sys
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
# "A pickle" is a program for a virtual pickle machine (PM, but more accurately
# called an unpickling machine). It's a sequence of opcodes, interpreted by the
# PM, building an arbitrarily complex Python object.
#
# For the most part, the PM is very simple: there are no looping, testing, or
# conditional instructions, no arithmetic and no function calls. Opcodes are
# executed once each, from first to last, until a STOP opcode is reached.
#
# The PM has two data areas, "the stack" and "the memo".
#
# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
# integer object on the stack, whose value is gotten from a decimal string
# literal immediately following the INT opcode in the pickle bytestream. Other
# opcodes take Python objects off the stack. The result of unpickling is
# whatever object is left on the stack when the final STOP opcode is executed.
#
# The memo is simply an array of objects, or it can be implemented as a dict
# mapping little integers to objects. The memo serves as the PM's "long term
# memory", and the little integers indexing the memo are akin to variable
# names. Some opcodes pop a stack object into the memo at a given index,
# and others push a memo object at a given index onto the stack again.
#
# At heart, that's all the PM has. Subtleties arise for these reasons:
#
# + Object identity. Objects can be arbitrarily complex, and subobjects
# may be shared (for example, the list [a, a] refers to the same object a
# twice). It can be vital that unpickling recreate an isomorphic object
# graph, faithfully reproducing sharing.
#
# + Recursive objects. For example, after "L = []; L.append(L)", L is a
# list, and L[0] is the same list. This is related to the object identity
# point, and some sequences of pickle opcodes are subtle in order to
# get the right result in all cases.
#
# + Things pickle doesn't know everything about. Examples of things pickle
# does know everything about are Python's builtin scalar and container
# types, like ints and tuples. They generally have opcodes dedicated to
# them. For things like module references and instances of user-defined
# classes, pickle's knowledge is limited. Historically, many enhancements
# have been made to the pickle protocol in order to do a better (faster,
# and/or more compact) job on those.
#
# + Backward compatibility and micro-optimization. As explained below,
# pickle opcodes never go away, not even when better ways to do a thing
# get invented. The repertoire of the PM just keeps growing over time.
# For example, protocol 0 had two opcodes for building Python integers (INT
# and LONG), protocol 1 added three more for more-efficient pickling of short
# integers, and protocol 2 added two more for more-efficient pickling of
# long integers (before protocol 2, the only ways to pickle a Python long
# took time quadratic in the number of digits, for both pickling and
# unpickling). "Opcode bloat" isn't so much a subtlety as a source of
# wearying complication.
#
#
# Pickle protocols:
#
# For compatibility, the meaning of a pickle opcode never changes. Instead new
# pickle opcodes get added, and each version's unpickler can handle all the
# pickle opcodes in all protocol versions to date. So old pickles continue to
# be readable forever. The pickler can generally be told to restrict itself to
# the subset of opcodes available under previous protocol versions too, so that
# users can create pickles under the current version readable by older
# versions. However, a pickle does not contain its version number embedded
# within it. If an older unpickler tries to read a pickle using a later
# protocol, the result is most likely an exception due to seeing an unknown (in
# the older unpickler) opcode.
#
# The original pickle used what's now called "protocol 0", and what was called
# "text mode" before Python 2.3. The entire pickle bytestream is made up of
# printable 7-bit ASCII characters, plus the newline character, in protocol 0.
# That's why it was called text mode. Protocol 0 is small and elegant, but
# sometimes painfully inefficient.
#
# The second major set of additions is now called "protocol 1", and was called
# "binary mode" before Python 2.3. This added many opcodes with arguments
# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
# bytes. Binary mode pickles can be substantially smaller than equivalent
# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
# int as 4 bytes following the opcode, which is cheaper to unpickle than the
# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added
# a number of opcodes that operate on many stack elements at once (like APPENDS
# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
#
# The third major set of additions came in Python 2.3, and is called "protocol
# 2". This added:
#
# - A better way to pickle instances of new-style classes (NEWOBJ).
#
# - A way for a pickle to identify its protocol (PROTO).
#
# - Time- and space- efficient pickling of long ints (LONG{1,4}).
#
# - Shortcuts for small tuples (TUPLE{1,2,3}}.
#
# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
#
# - The "extension registry", a vector of popular objects that can be pushed
# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
# the registry contents are predefined (there's nothing akin to the memo's
# PUT).
#
# Another independent change with Python 2.3 is the abandonment of any
# pretense that it might be safe to load pickles received from untrusted
# parties -- no sufficient security analysis has been done to guarantee
# this and there isn't a use case that warrants the expense of such an
# analysis.
#
# To this end, all tests for __safe_for_unpickling__ or for
# copyreg.safe_constructors are removed from the unpickling code.
# References to these variables in the descriptions below are to be seen
# as describing unpickling in Python 2.2 and before.
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int
TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4,
TAKEN_FROM_ARGUMENT4U,
TAKEN_FROM_ARGUMENT8U))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_uint4(f):
r"""
>>> import io
>>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<I", data)[0]
raise ValueError("not enough data in stream to read uint4")
uint4 = ArgumentDescriptor(
name='uint4',
n=4,
reader=read_uint4,
doc="Four-byte unsigned integer, little-endian.")
def read_uint8(f):
r"""
>>> import io
>>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00'))
255
>>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1
True
"""
data = f.read(8)
if len(data) == 8:
return _unpack("<Q", data)[0]
raise ValueError("not enough data in stream to read uint8")
uint8 = ArgumentDescriptor(
name='uint8',
n=8,
reader=read_uint8,
doc="Eight-byte unsigned integer, little-endian.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes, and the second argument is that many bytes.
""")
def read_bytes4(f):
r"""
>>> import io
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc"))
b''
>>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
b'abc'
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a bytes4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes4, but only %d remain" %
(n, len(data)))
bytes4 = ArgumentDescriptor(
name="bytes4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_bytes4,
doc="""A counted bytes string.
The first argument is a 4-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_bytes8(f):
r"""
>>> import io, struct, sys
>>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc"))
b''
>>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef"))
b'abc'
>>> bigsize8 = struct.pack("<Q", sys.maxsize//3)
>>> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: expected ... bytes in a bytes8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes8, but only %d remain" %
(n, len(data)))
bytes8 = ArgumentDescriptor(
name="bytes8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_bytes8,
doc="""A counted bytes string.
The first argument is an 8-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring1(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) # little-endian 1-byte length
>>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring1(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring1, but only 6 remain
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring1, but only %d "
"remain" % (n, len(data)))
unicodestring1 = ArgumentDescriptor(
name="unicodestring1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_unicodestring1,
doc="""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring8(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) + b'\0' * 7 # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring8, but only %d "
"remain" % (n, len(data)))
unicodestring8 = ArgumentDescriptor(
name="unicodestring8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_unicodestring8,
doc="""A counted Unicode string.
The first argument is an 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
# There's a hack for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(
name='int',
obtype=int,
doc="A Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer or boolean object.")
pybool = StackObject(
name='bool',
obtype=bool,
doc="A Python boolean object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pybytes_or_str = pystring = StackObject(
name='bytes_or_str',
obtype=(bytes, str),
doc="A Python bytes or (Unicode) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
pyset = StackObject(
name="set",
obtype=set,
doc="A Python set object.")
pyfrozenset = StackObject(
name="frozenset",
obtype=set,
doc="A Python frozenset object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pyint],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pybytes_or_str],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. These are usually decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian
signed int giving the number of bytes in the string, and the
second is that many bytes, which are taken literally as the string
content. These are usually decoded into a str instance using the
encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. These are
usually decoded into a str instance using the encoding given to
the Unpickler constructor. or the default, 'ASCII'. If the
encoding given was 'bytes' however, they will be decoded as bytes
object instead.
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=bytes4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes, and the second is that many bytes, which are
taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=bytes1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes, and the second is that many bytes, which are taken
literally as the string content.
"""),
I(name='BINBYTES8',
code='\x8e',
arg=bytes8,
stack_before=[],
stack_after=[pybytes],
proto=4,
doc="""Push a Python bytes object.
There are two arguments: the first is an 8-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='SHORT_BINUNICODE',
code='\x8c',
arg=unicodestring1,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 1-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE8',
code='\x8d',
arg=unicodestring8,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is an 8-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Ways to build sets
I(name='EMPTY_SET',
code='\x8f',
arg=None,
stack_before=[],
stack_after=[pyset],
proto=4,
doc="Push an empty set."),
I(name='ADDITEMS',
code='\x90',
arg=None,
stack_before=[pyset, markobject, stackslice],
stack_after=[pyset],
proto=4,
doc="""Add an arbitrary number of items to an existing set.
The slice of the stack following the topmost markobject is taken as
a sequence of items, added to the set immediately under the topmost
markobject. Everything at and after the topmost markobject is popped,
leaving the mutated set at the top of the stack.
Stack before: ... pyset markobject item_1 ... item_n
Stack after: ... pyset
where pyset has been modified via pyset.add(item_i) = item_i for i in
1, 2, ..., n, and in that order.
"""),
# Way to build frozensets
I(name='FROZENSET',
code='\x91',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pyfrozenset],
proto=4,
doc="""Build a frozenset out of the topmost slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python frozenset, which single frozenset object replaces all
of the stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3
Stack after: ... frozenset({1, 2, 3})
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=uint4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte unsigned
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=uint4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
unsigned little-endian integer following.
"""),
I(name='MEMOIZE',
code='\x94',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=4,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write is the number of
elements currently present in the memo.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
I(name='STACK_GLOBAL',
code='\x93',
arg=None,
stack_before=[pyunicode, pyunicode],
stack_after=[anyobject],
proto=4,
doc="""Push a global object (module.attr) on the stack.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
NOTE: the distinction between old-style and new-style classes does
not make sense in Python 3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
I(name='NEWOBJ_EX',
code='\x92',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[anyobject],
proto=4,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple and by a keyword argument dict
(the dict being the stack top). Call these cls and args. They are
popped off the stack, and the value returned by
cls.__new__(cls, *args, *kwargs) is pushed back onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Framing support.
I(name='FRAME',
code='\x95',
arg=uint8,
stack_before=[],
stack_after=[],
proto=4,
doc="""Indicate the beginning of a new frame.
The unpickler may use this opcode to safely prefetch data from its
underlying stream.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def _genops(data, yield_end_pos=False):
if isinstance(data, bytes_types):
data = io.BytesIO(data)
if hasattr(data, "tell"):
getpos = data.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = data.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
"<unknown>" if pos is None else pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(data)
if yield_end_pos:
yield opcode, arg, pos, getpos()
else:
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
return _genops(pickle)
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
put = 'PUT'
get = 'GET'
oldids = set() # set of all PUT ids
newids = {} # set of ids used by a GET opcode
opcodes = [] # (op, idx) or (pos, end_pos)
proto = 0
protoheader = b''
for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True):
if 'PUT' in opcode.name:
oldids.add(arg)
opcodes.append((put, arg))
elif opcode.name == 'MEMOIZE':
idx = len(oldids)
oldids.add(idx)
opcodes.append((put, idx))
elif 'FRAME' in opcode.name:
pass
elif 'GET' in opcode.name:
if opcode.proto > proto:
proto = opcode.proto
newids[arg] = None
opcodes.append((get, arg))
elif opcode.name == 'PROTO':
if arg > proto:
proto = arg
if pos == 0:
protoheader = p[pos: end_pos]
else:
opcodes.append((pos, end_pos))
else:
opcodes.append((pos, end_pos))
del oldids
# Copy the opcodes except for PUTS without a corresponding GET
out = io.BytesIO()
# Write the PROTO header before any framing
out.write(protoheader)
pickler = pickle._Pickler(out, proto)
if proto >= 4:
pickler.framer.start_framing()
idx = 0
for op, arg in opcodes:
if op is put:
if arg not in newids:
continue
data = pickler.put(idx)
newids[arg] = idx
idx += 1
elif op is get:
data = pickler.get(newids[arg])
else:
data = p[op:arg]
pickler.framer.commit_frame()
pickler.write(data)
pickler.framer.end_framing()
return out.getvalue()
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # column hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"):
if opcode.name == "MEMOIZE":
memo_idx = len(memo)
markmsg = "(as %d)" % memo_idx
else:
assert arg is not None
memo_idx = arg
if memo_idx in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[memo_idx] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
| yotchang4s/cafebabepy | src/main/python/pickletools.py | Python | bsd-3-clause | 91,809 |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
def test_basic():
s = pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd"), name="foo")
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_mixed_type():
s = pd.Series(
[[0, 1, 2], np.nan, None, np.array([]), pd.Series(["a", "b"])], name="foo"
)
result = s.explode()
expected = pd.Series(
[0, 1, 2, np.nan, None, np.nan, "a", "b"],
index=[0, 0, 0, 1, 2, 3, 4, 4],
dtype=object,
name="foo",
)
tm.assert_series_equal(result, expected)
def test_empty():
s = pd.Series(dtype=object)
result = s.explode()
expected = s.copy()
tm.assert_series_equal(result, expected)
def test_nested_lists():
s = pd.Series([[[1, 2, 3]], [1, 2], 1])
result = s.explode()
expected = pd.Series([[1, 2, 3], 1, 2, 1], index=[0, 1, 1, 2])
tm.assert_series_equal(result, expected)
def test_multi_index():
s = pd.Series(
[[0, 1, 2], np.nan, [], (3, 4)],
name="foo",
index=pd.MultiIndex.from_product([list("ab"), range(2)], names=["foo", "bar"]),
)
result = s.explode()
index = pd.MultiIndex.from_tuples(
[("a", 0), ("a", 0), ("a", 0), ("a", 1), ("b", 0), ("b", 1), ("b", 1)],
names=["foo", "bar"],
)
expected = pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=index, dtype=object, name="foo"
)
tm.assert_series_equal(result, expected)
def test_large():
s = pd.Series([range(256)]).explode()
result = s.explode()
tm.assert_series_equal(result, s)
def test_invert_array():
df = pd.DataFrame({"a": pd.date_range("20190101", periods=3, tz="UTC")})
listify = df.apply(lambda x: x.array, axis=1)
result = listify.explode()
tm.assert_series_equal(result, df["a"].rename())
@pytest.mark.parametrize(
"s", [pd.Series([1, 2, 3]), pd.Series(pd.date_range("2019", periods=3, tz="UTC"))]
)
def non_object_dtype(s):
result = s.explode()
tm.assert_series_equal(result, s)
def test_typical_usecase():
df = pd.DataFrame(
[{"var1": "a,b,c", "var2": 1}, {"var1": "d,e,f", "var2": 2}],
columns=["var1", "var2"],
)
exploded = df.var1.str.split(",").explode()
result = df[["var2"]].join(exploded)
expected = pd.DataFrame(
{"var2": [1, 1, 1, 2, 2, 2], "var1": list("abcdef")},
columns=["var2", "var1"],
index=[0, 0, 0, 1, 1, 1],
)
tm.assert_frame_equal(result, expected)
def test_nested_EA():
# a nested EA array
s = pd.Series(
[
pd.date_range("20170101", periods=3, tz="UTC"),
pd.date_range("20170104", periods=3, tz="UTC"),
]
)
result = s.explode()
expected = pd.Series(
pd.date_range("20170101", periods=6, tz="UTC"), index=[0, 0, 0, 1, 1, 1]
)
tm.assert_series_equal(result, expected)
def test_duplicate_index():
# GH 28005
s = pd.Series([[1, 2], [3, 4]], index=[0, 0])
result = s.explode()
expected = pd.Series([1, 2, 3, 4], index=[0, 0, 0, 0], dtype=object)
tm.assert_series_equal(result, expected)
def test_ignore_index():
# GH 34932
s = pd.Series([[1, 2], [3, 4]])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object)
tm.assert_series_equal(result, expected)
def test_explode_sets():
# https://github.com/pandas-dev/pandas/issues/35614
s = pd.Series([{"a", "b", "c"}], index=[1])
result = s.explode().sort_values()
expected = pd.Series(["a", "b", "c"], index=[1, 1, 1])
tm.assert_series_equal(result, expected)
def test_explode_scalars_can_ignore_index():
# https://github.com/pandas-dev/pandas/issues/40487
s = pd.Series([1, 2, 3], index=["a", "b", "c"])
result = s.explode(ignore_index=True)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(result, expected)
| dsm054/pandas | pandas/tests/series/methods/test_explode.py | Python | bsd-3-clause | 4,090 |
# $Id$
# importing this module shouldn't directly cause other large imports
# do large imports in the init() hook so that you can call back to the
# ModuleManager progress handler methods.
"""vtk_kit package driver file.
This performs all initialisation necessary to use VTK from DeVIDE. Makes
sure that all VTK classes have ErrorEvent handlers that report back to
the ModuleManager.
Inserts the following modules in sys.modules: vtk, vtkdevide.
@author: Charl P. Botha <http://cpbotha.net/>
"""
import re
import sys
import traceback
import types
VERSION = ''
def preImportVTK(progressMethod):
vtkImportList = [('vtk.common', 'VTK Common.'),
('vtk.filtering', 'VTK Filtering.'),
('vtk.io', 'VTK IO.'),
('vtk.imaging', 'VTK Imaging.'),
('vtk.graphics', 'VTK Graphics.'),
('vtk.rendering', 'VTK Rendering.'),
('vtk.hybrid', 'VTK Hybrid.'),
#('vtk.patented', 'VTK Patented.'),
('vtk', 'Other VTK symbols')]
# set the dynamic loading flags. If we don't do this, we get strange
# errors on 64 bit machines. To see this happen, comment this statement
# and then run the VTK->ITK connection test case.
oldflags = setDLFlags()
percentStep = 100.0 / len(vtkImportList)
currentPercent = 0.0
# do the imports
for module, message in vtkImportList:
currentPercent += percentStep
progressMethod(currentPercent, 'Initialising vtk_kit: %s' % (message,),
noTime=True)
exec('import %s' % (module,))
# restore previous dynamic loading flags
resetDLFlags(oldflags)
def setDLFlags():
# brought over from ITK Wrapping/CSwig/Python
# Python "help(sys.setdlopenflags)" states:
#
# setdlopenflags(...)
# setdlopenflags(n) -> None
#
# Set the flags that will be used for dlopen() calls. Among other
# things, this will enable a lazy resolving of symbols when
# importing a module, if called as sys.setdlopenflags(0) To share
# symbols across extension modules, call as
#
# sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
#
# GCC 3.x depends on proper merging of symbols for RTTI:
# http://gcc.gnu.org/faq.html#dso
#
try:
import dl
newflags = dl.RTLD_NOW|dl.RTLD_GLOBAL
except:
newflags = 0x102 # No dl module, so guess (see above).
try:
oldflags = sys.getdlopenflags()
sys.setdlopenflags(newflags)
except:
oldflags = None
return oldflags
def resetDLFlags(data):
# brought over from ITK Wrapping/CSwig/Python
# Restore the original dlopen flags.
try:
sys.setdlopenflags(data)
except:
pass
def init(module_manager, pre_import=True):
# first do the VTK pre-imports: this is here ONLY to keep the user happy
# it's not necessary for normal functioning
if pre_import:
preImportVTK(module_manager.setProgress)
# import the main module itself
# the global is so that users can also do:
# from module_kits import vtk_kit
# vtk_kit.vtk.vtkSomeFilter()
global vtk
import vtk
# and do the same for vtkdevide
global vtkdevide
import vtkdevide
# load up some generic functions into this namespace
# user can, after import of module_kits.vtk_kit, address these as
# module_kits.vtk_kit.blaat. In this case we don't need "global",
# as these are modules directly in this package.
import module_kits.vtk_kit.misc as misc
import module_kits.vtk_kit.mixins as mixins
import module_kits.vtk_kit.utils as utils
import module_kits.vtk_kit.constants as constants
import module_kits.vtk_kit.color_scales as color_scales
# setup the kit version
global VERSION
VERSION = '%s' % (vtk.vtkVersion.GetVTKVersion(),)
| nagyistoce/devide | module_kits/vtk_kit/__init__.py | Python | bsd-3-clause | 3,965 |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna ([email protected])
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| davideuler/foursquared | util/gen_parser.py | Python | apache-2.0 | 4,392 |
"""
WSGI config for made_with_twd_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "made_with_twd_project.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "made_with_twd_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| leifos/tango_with_django | made_with_twd_project/made_with_twd_project/wsgi.py | Python | mit | 1,464 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
###############Credits######################################################
# Coded by: María Gabriela Quilarque <[email protected]>
# Luis Escobar <[email protected]>
# Planified by: Nhomar Hernandez
# Finance by: Vauxoo, C.A. http://vauxoo.com
# Audited by: Humberto Arocha [email protected]
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import account
| 3dfxsoftware/cbss-addons | report_move_voucher/report/__init__.py | Python | gpl-2.0 | 1,449 |
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <[email protected]>
#
# Support for a Chromium OS Google Binary Block, used to record read-only
# information mostly used by firmware.
from collections import OrderedDict
from patman import command
from binman.entry import Entry, EntryArg
from dtoc import fdt_util
from patman import tools
# Build GBB flags.
# (src/platform/vboot_reference/firmware/include/gbb_header.h)
gbb_flag_properties = {
'dev-screen-short-delay': 0x1,
'load-option-roms': 0x2,
'enable-alternate-os': 0x4,
'force-dev-switch-on': 0x8,
'force-dev-boot-usb': 0x10,
'disable-fw-rollback-check': 0x20,
'enter-triggers-tonorm': 0x40,
'force-dev-boot-legacy': 0x80,
'faft-key-override': 0x100,
'disable-ec-software-sync': 0x200,
'default-dev-boot-legacy': 0x400,
'disable-pd-software-sync': 0x800,
'disable-lid-shutdown': 0x1000,
'force-dev-boot-fastboot-full-cap': 0x2000,
'enable-serial': 0x4000,
'disable-dwmp': 0x8000,
}
class Entry_gbb(Entry):
"""An entry which contains a Chromium OS Google Binary Block
Properties / Entry arguments:
- hardware-id: Hardware ID to use for this build (a string)
- keydir: Directory containing the public keys to use
- bmpblk: Filename containing images used by recovery
Chromium OS uses a GBB to store various pieces of information, in particular
the root and recovery keys that are used to verify the boot process. Some
more details are here:
https://www.chromium.org/chromium-os/firmware-porting-guide/2-concepts
but note that the page dates from 2013 so is quite out of date. See
README.chromium for how to obtain the required keys and tools.
"""
def __init__(self, section, etype, node):
super().__init__(section, etype, node)
self.hardware_id, self.keydir, self.bmpblk = self.GetEntryArgsOrProps(
[EntryArg('hardware-id', str),
EntryArg('keydir', str),
EntryArg('bmpblk', str)])
# Read in the GBB flags from the config
self.gbb_flags = 0
flags_node = node.FindNode('flags')
if flags_node:
for flag, value in gbb_flag_properties.items():
if fdt_util.GetBool(flags_node, flag):
self.gbb_flags |= value
def ObtainContents(self):
gbb = 'gbb.bin'
fname = tools.GetOutputFilename(gbb)
if not self.size:
self.Raise('GBB must have a fixed size')
gbb_size = self.size
bmpfv_size = gbb_size - 0x2180
if bmpfv_size < 0:
self.Raise('GBB is too small (minimum 0x2180 bytes)')
sizes = [0x100, 0x1000, bmpfv_size, 0x1000]
sizes = ['%#x' % size for size in sizes]
keydir = tools.GetInputFilename(self.keydir)
gbb_set_command = [
'gbb_utility', '-s',
'--hwid=%s' % self.hardware_id,
'--rootkey=%s/root_key.vbpubk' % keydir,
'--recoverykey=%s/recovery_key.vbpubk' % keydir,
'--flags=%d' % self.gbb_flags,
'--bmpfv=%s' % tools.GetInputFilename(self.bmpblk),
fname]
tools.Run('futility', 'gbb_utility', '-c', ','.join(sizes), fname)
tools.Run('futility', *gbb_set_command)
self.SetContents(tools.ReadFile(fname))
return True
| Digilent/u-boot-digilent | tools/binman/etype/gbb.py | Python | gpl-2.0 | 3,381 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_ipsec_phase1
short_description: Configure VPN remote gateway in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify vpn_ipsec feature and phase1 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
vpn_ipsec_phase1:
description:
- Configure VPN remote gateway.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
acct_verify:
description:
- Enable/disable verification of RADIUS accounting record.
type: str
choices:
- enable
- disable
add_gw_route:
description:
- Enable/disable automatically add a route to the remote gateway.
type: str
choices:
- enable
- disable
add_route:
description:
- Enable/disable control addition of a route to peer destination selector.
type: str
choices:
- disable
- enable
assign_ip:
description:
- Enable/disable assignment of IP to IPsec interface via configuration method.
type: str
choices:
- disable
- enable
assign_ip_from:
description:
- Method by which the IP address will be assigned.
type: str
choices:
- range
- usrgrp
- dhcp
- name
authmethod:
description:
- Authentication method.
type: str
choices:
- psk
- signature
authmethod_remote:
description:
- Authentication method (remote side).
type: str
choices:
- psk
- signature
authpasswd:
description:
- XAuth password (max 35 characters).
type: str
authusr:
description:
- XAuth user name.
type: str
authusrgrp:
description:
- Authentication user group. Source user.group.name.
type: str
auto_negotiate:
description:
- Enable/disable automatic initiation of IKE SA negotiation.
type: str
choices:
- enable
- disable
backup_gateway:
description:
- Instruct unity clients about the backup gateway address(es).
type: list
suboptions:
address:
description:
- Address of backup gateway.
required: true
type: str
banner:
description:
- Message that unity client should display after connecting.
type: str
cert_id_validation:
description:
- Enable/disable cross validation of peer ID and the identity in the peer's certificate as specified in RFC 4945.
type: str
choices:
- enable
- disable
certificate:
description:
- Names of up to 4 signed personal certificates.
type: list
suboptions:
name:
description:
- Certificate name. Source vpn.certificate.local.name.
required: true
type: str
childless_ike:
description:
- Enable/disable childless IKEv2 initiation (RFC 6023).
type: str
choices:
- enable
- disable
client_auto_negotiate:
description:
- Enable/disable allowing the VPN client to bring up the tunnel when there is no traffic.
type: str
choices:
- disable
- enable
client_keep_alive:
description:
- Enable/disable allowing the VPN client to keep the tunnel up when there is no traffic.
type: str
choices:
- disable
- enable
comments:
description:
- Comment.
type: str
dhgrp:
description:
- DH group.
type: str
choices:
- 1
- 2
- 5
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 27
- 28
- 29
- 30
- 31
digital_signature_auth:
description:
- Enable/disable IKEv2 Digital Signature Authentication (RFC 7427).
type: str
choices:
- enable
- disable
distance:
description:
- Distance for routes added by IKE (1 - 255).
type: int
dns_mode:
description:
- DNS server mode.
type: str
choices:
- manual
- auto
domain:
description:
- Instruct unity clients about the default DNS domain.
type: str
dpd:
description:
- Dead Peer Detection mode.
type: str
choices:
- disable
- on-idle
- on-demand
dpd_retrycount:
description:
- Number of DPD retry attempts.
type: int
dpd_retryinterval:
description:
- DPD retry interval.
type: str
eap:
description:
- Enable/disable IKEv2 EAP authentication.
type: str
choices:
- enable
- disable
eap_identity:
description:
- IKEv2 EAP peer identity type.
type: str
choices:
- use-id-payload
- send-request
enforce_unique_id:
description:
- Enable/disable peer ID uniqueness check.
type: str
choices:
- disable
- keep-new
- keep-old
forticlient_enforcement:
description:
- Enable/disable FortiClient enforcement.
type: str
choices:
- enable
- disable
fragmentation:
description:
- Enable/disable fragment IKE message on re-transmission.
type: str
choices:
- enable
- disable
fragmentation_mtu:
description:
- IKE fragmentation MTU (500 - 16000).
type: int
group_authentication:
description:
- Enable/disable IKEv2 IDi group authentication.
type: str
choices:
- enable
- disable
group_authentication_secret:
description:
- Password for IKEv2 IDi group authentication. (ASCII string or hexadecimal indicated by a leading 0x.)
type: str
ha_sync_esp_seqno:
description:
- Enable/disable sequence number jump ahead for IPsec HA.
type: str
choices:
- enable
- disable
idle_timeout:
description:
- Enable/disable IPsec tunnel idle timeout.
type: str
choices:
- enable
- disable
idle_timeoutinterval:
description:
- IPsec tunnel idle timeout in minutes (5 - 43200).
type: int
ike_version:
description:
- IKE protocol version.
type: str
choices:
- 1
- 2
include_local_lan:
description:
- Enable/disable allow local LAN access on unity clients.
type: str
choices:
- disable
- enable
interface:
description:
- Local physical, aggregate, or VLAN outgoing interface. Source system.interface.name.
type: str
ipv4_dns_server1:
description:
- IPv4 DNS server 1.
type: str
ipv4_dns_server2:
description:
- IPv4 DNS server 2.
type: str
ipv4_dns_server3:
description:
- IPv4 DNS server 3.
type: str
ipv4_end_ip:
description:
- End of IPv4 range.
type: str
ipv4_exclude_range:
description:
- Configuration Method IPv4 exclude ranges.
type: list
suboptions:
end_ip:
description:
- End of IPv4 exclusive range.
type: str
id:
description:
- ID.
required: true
type: int
start_ip:
description:
- Start of IPv4 exclusive range.
type: str
ipv4_name:
description:
- IPv4 address name. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_netmask:
description:
- IPv4 Netmask.
type: str
ipv4_split_exclude:
description:
- IPv4 subnets that should not be sent over the IPsec tunnel. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_split_include:
description:
- IPv4 split-include subnets. Source firewall.address.name firewall.addrgrp.name.
type: str
ipv4_start_ip:
description:
- Start of IPv4 range.
type: str
ipv4_wins_server1:
description:
- WINS server 1.
type: str
ipv4_wins_server2:
description:
- WINS server 2.
type: str
ipv6_dns_server1:
description:
- IPv6 DNS server 1.
type: str
ipv6_dns_server2:
description:
- IPv6 DNS server 2.
type: str
ipv6_dns_server3:
description:
- IPv6 DNS server 3.
type: str
ipv6_end_ip:
description:
- End of IPv6 range.
type: str
ipv6_exclude_range:
description:
- Configuration method IPv6 exclude ranges.
type: list
suboptions:
end_ip:
description:
- End of IPv6 exclusive range.
type: str
id:
description:
- ID.
required: true
type: int
start_ip:
description:
- Start of IPv6 exclusive range.
type: str
ipv6_name:
description:
- IPv6 address name. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_prefix:
description:
- IPv6 prefix.
type: int
ipv6_split_exclude:
description:
- IPv6 subnets that should not be sent over the IPsec tunnel. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_split_include:
description:
- IPv6 split-include subnets. Source firewall.address6.name firewall.addrgrp6.name.
type: str
ipv6_start_ip:
description:
- Start of IPv6 range.
type: str
keepalive:
description:
- NAT-T keep alive interval.
type: int
keylife:
description:
- Time to wait in seconds before phase 1 encryption key expires.
type: int
local_gw:
description:
- Local VPN gateway.
type: str
localid:
description:
- Local ID.
type: str
localid_type:
description:
- Local ID type.
type: str
choices:
- auto
- fqdn
- user-fqdn
- keyid
- address
- asn1dn
mesh_selector_type:
description:
- Add selectors containing subsets of the configuration depending on traffic.
type: str
choices:
- disable
- subnet
- host
mode:
description:
- ID protection mode used to establish a secure channel.
type: str
choices:
- aggressive
- main
mode_cfg:
description:
- Enable/disable configuration method.
type: str
choices:
- disable
- enable
name:
description:
- IPsec remote gateway name.
required: true
type: str
nattraversal:
description:
- Enable/disable NAT traversal.
type: str
choices:
- enable
- disable
- forced
negotiate_timeout:
description:
- IKE SA negotiation timeout in seconds (1 - 300).
type: int
peer:
description:
- Accept this peer certificate. Source user.peer.name.
type: str
peergrp:
description:
- Accept this peer certificate group. Source user.peergrp.name.
type: str
peerid:
description:
- Accept this peer identity.
type: str
peertype:
description:
- Accept this peer type.
type: str
choices:
- any
- one
- dialup
- peer
- peergrp
ppk:
description:
- Enable/disable IKEv2 Postquantum Preshared Key (PPK).
type: str
choices:
- disable
- allow
- require
ppk_identity:
description:
- IKEv2 Postquantum Preshared Key Identity.
type: str
ppk_secret:
description:
- IKEv2 Postquantum Preshared Key (ASCII string or hexadecimal encoded with a leading 0x).
type: str
priority:
description:
- Priority for routes added by IKE (0 - 4294967295).
type: int
proposal:
description:
- Phase1 proposal.
type: str
choices:
- des-md5
- des-sha1
- des-sha256
- des-sha384
- des-sha512
psksecret:
description:
- Pre-shared secret for PSK authentication (ASCII string or hexadecimal encoded with a leading 0x).
type: str
psksecret_remote:
description:
- Pre-shared secret for remote side PSK authentication (ASCII string or hexadecimal encoded with a leading 0x).
type: str
reauth:
description:
- Enable/disable re-authentication upon IKE SA lifetime expiration.
type: str
choices:
- disable
- enable
rekey:
description:
- Enable/disable phase1 rekey.
type: str
choices:
- enable
- disable
remote_gw:
description:
- Remote VPN gateway.
type: str
remotegw_ddns:
description:
- Domain name of remote gateway (eg. name.DDNS.com).
type: str
rsa_signature_format:
description:
- Digital Signature Authentication RSA signature format.
type: str
choices:
- pkcs1
- pss
save_password:
description:
- Enable/disable saving XAuth username and password on VPN clients.
type: str
choices:
- disable
- enable
send_cert_chain:
description:
- Enable/disable sending certificate chain.
type: str
choices:
- enable
- disable
signature_hash_alg:
description:
- Digital Signature Authentication hash algorithms.
type: str
choices:
- sha1
- sha2-256
- sha2-384
- sha2-512
split_include_service:
description:
- Split-include services. Source firewall.service.group.name firewall.service.custom.name.
type: str
suite_b:
description:
- Use Suite-B.
type: str
choices:
- disable
- suite-b-gcm-128
- suite-b-gcm-256
type:
description:
- Remote gateway type.
type: str
choices:
- static
- dynamic
- ddns
unity_support:
description:
- Enable/disable support for Cisco UNITY Configuration Method extensions.
type: str
choices:
- disable
- enable
usrgrp:
description:
- User group name for dialup peers. Source user.group.name.
type: str
wizard_type:
description:
- GUI VPN Wizard Type.
type: str
choices:
- custom
- dialup-forticlient
- dialup-ios
- dialup-android
- dialup-windows
- dialup-cisco
- static-fortigate
- dialup-fortigate
- static-cisco
- dialup-cisco-fw
xauthtype:
description:
- XAuth type.
type: str
choices:
- disable
- client
- pap
- chap
- auto
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure VPN remote gateway.
fortios_vpn_ipsec_phase1:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
vpn_ipsec_phase1:
acct_verify: "enable"
add_gw_route: "enable"
add_route: "disable"
assign_ip: "disable"
assign_ip_from: "range"
authmethod: "psk"
authmethod_remote: "psk"
authpasswd: "<your_own_value>"
authusr: "<your_own_value>"
authusrgrp: "<your_own_value> (source user.group.name)"
auto_negotiate: "enable"
backup_gateway:
-
address: "<your_own_value>"
banner: "<your_own_value>"
cert_id_validation: "enable"
certificate:
-
name: "default_name_19 (source vpn.certificate.local.name)"
childless_ike: "enable"
client_auto_negotiate: "disable"
client_keep_alive: "disable"
comments: "<your_own_value>"
dhgrp: "1"
digital_signature_auth: "enable"
distance: "26"
dns_mode: "manual"
domain: "<your_own_value>"
dpd: "disable"
dpd_retrycount: "30"
dpd_retryinterval: "<your_own_value>"
eap: "enable"
eap_identity: "use-id-payload"
enforce_unique_id: "disable"
forticlient_enforcement: "enable"
fragmentation: "enable"
fragmentation_mtu: "37"
group_authentication: "enable"
group_authentication_secret: "<your_own_value>"
ha_sync_esp_seqno: "enable"
idle_timeout: "enable"
idle_timeoutinterval: "42"
ike_version: "1"
include_local_lan: "disable"
interface: "<your_own_value> (source system.interface.name)"
ipv4_dns_server1: "<your_own_value>"
ipv4_dns_server2: "<your_own_value>"
ipv4_dns_server3: "<your_own_value>"
ipv4_end_ip: "<your_own_value>"
ipv4_exclude_range:
-
end_ip: "<your_own_value>"
id: "52"
start_ip: "<your_own_value>"
ipv4_name: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_netmask: "<your_own_value>"
ipv4_split_exclude: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_split_include: "<your_own_value> (source firewall.address.name firewall.addrgrp.name)"
ipv4_start_ip: "<your_own_value>"
ipv4_wins_server1: "<your_own_value>"
ipv4_wins_server2: "<your_own_value>"
ipv6_dns_server1: "<your_own_value>"
ipv6_dns_server2: "<your_own_value>"
ipv6_dns_server3: "<your_own_value>"
ipv6_end_ip: "<your_own_value>"
ipv6_exclude_range:
-
end_ip: "<your_own_value>"
id: "67"
start_ip: "<your_own_value>"
ipv6_name: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_prefix: "70"
ipv6_split_exclude: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_split_include: "<your_own_value> (source firewall.address6.name firewall.addrgrp6.name)"
ipv6_start_ip: "<your_own_value>"
keepalive: "74"
keylife: "75"
local_gw: "<your_own_value>"
localid: "<your_own_value>"
localid_type: "auto"
mesh_selector_type: "disable"
mode: "aggressive"
mode_cfg: "disable"
name: "default_name_82"
nattraversal: "enable"
negotiate_timeout: "84"
peer: "<your_own_value> (source user.peer.name)"
peergrp: "<your_own_value> (source user.peergrp.name)"
peerid: "<your_own_value>"
peertype: "any"
ppk: "disable"
ppk_identity: "<your_own_value>"
ppk_secret: "<your_own_value>"
priority: "92"
proposal: "des-md5"
psksecret: "<your_own_value>"
psksecret_remote: "<your_own_value>"
reauth: "disable"
rekey: "enable"
remote_gw: "<your_own_value>"
remotegw_ddns: "<your_own_value>"
rsa_signature_format: "pkcs1"
save_password: "disable"
send_cert_chain: "enable"
signature_hash_alg: "sha1"
split_include_service: "<your_own_value> (source firewall.service.group.name firewall.service.custom.name)"
suite_b: "disable"
type: "static"
unity_support: "disable"
usrgrp: "<your_own_value> (source user.group.name)"
wizard_type: "custom"
xauthtype: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_vpn_ipsec_phase1_data(json):
option_list = ['acct_verify', 'add_gw_route', 'add_route',
'assign_ip', 'assign_ip_from', 'authmethod',
'authmethod_remote', 'authpasswd', 'authusr',
'authusrgrp', 'auto_negotiate', 'backup_gateway',
'banner', 'cert_id_validation', 'certificate',
'childless_ike', 'client_auto_negotiate', 'client_keep_alive',
'comments', 'dhgrp', 'digital_signature_auth',
'distance', 'dns_mode', 'domain',
'dpd', 'dpd_retrycount', 'dpd_retryinterval',
'eap', 'eap_identity', 'enforce_unique_id',
'forticlient_enforcement', 'fragmentation', 'fragmentation_mtu',
'group_authentication', 'group_authentication_secret', 'ha_sync_esp_seqno',
'idle_timeout', 'idle_timeoutinterval', 'ike_version',
'include_local_lan', 'interface', 'ipv4_dns_server1',
'ipv4_dns_server2', 'ipv4_dns_server3', 'ipv4_end_ip',
'ipv4_exclude_range', 'ipv4_name', 'ipv4_netmask',
'ipv4_split_exclude', 'ipv4_split_include', 'ipv4_start_ip',
'ipv4_wins_server1', 'ipv4_wins_server2', 'ipv6_dns_server1',
'ipv6_dns_server2', 'ipv6_dns_server3', 'ipv6_end_ip',
'ipv6_exclude_range', 'ipv6_name', 'ipv6_prefix',
'ipv6_split_exclude', 'ipv6_split_include', 'ipv6_start_ip',
'keepalive', 'keylife', 'local_gw',
'localid', 'localid_type', 'mesh_selector_type',
'mode', 'mode_cfg', 'name',
'nattraversal', 'negotiate_timeout', 'peer',
'peergrp', 'peerid', 'peertype',
'ppk', 'ppk_identity', 'ppk_secret',
'priority', 'proposal', 'psksecret',
'psksecret_remote', 'reauth', 'rekey',
'remote_gw', 'remotegw_ddns', 'rsa_signature_format',
'save_password', 'send_cert_chain', 'signature_hash_alg',
'split_include_service', 'suite_b', 'type',
'unity_support', 'usrgrp', 'wizard_type',
'xauthtype']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def vpn_ipsec_phase1(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['vpn_ipsec_phase1'] and data['vpn_ipsec_phase1']:
state = data['vpn_ipsec_phase1']['state']
else:
state = True
vpn_ipsec_phase1_data = data['vpn_ipsec_phase1']
filtered_data = underscore_to_hyphen(filter_vpn_ipsec_phase1_data(vpn_ipsec_phase1_data))
if state == "present":
return fos.set('vpn.ipsec',
'phase1',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('vpn.ipsec',
'phase1',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_vpn_ipsec(data, fos):
if data['vpn_ipsec_phase1']:
resp = vpn_ipsec_phase1(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"vpn_ipsec_phase1": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"acct_verify": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"add_gw_route": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"add_route": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"assign_ip": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"assign_ip_from": {"required": False, "type": "str",
"choices": ["range", "usrgrp", "dhcp",
"name"]},
"authmethod": {"required": False, "type": "str",
"choices": ["psk", "signature"]},
"authmethod_remote": {"required": False, "type": "str",
"choices": ["psk", "signature"]},
"authpasswd": {"required": False, "type": "str"},
"authusr": {"required": False, "type": "str"},
"authusrgrp": {"required": False, "type": "str"},
"auto_negotiate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"backup_gateway": {"required": False, "type": "list",
"options": {
"address": {"required": True, "type": "str"}
}},
"banner": {"required": False, "type": "str"},
"cert_id_validation": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"certificate": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"childless_ike": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"client_auto_negotiate": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"client_keep_alive": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"comments": {"required": False, "type": "str"},
"dhgrp": {"required": False, "type": "str",
"choices": ["1", "2", "5",
"14", "15", "16",
"17", "18", "19",
"20", "21", "27",
"28", "29", "30",
"31"]},
"digital_signature_auth": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"distance": {"required": False, "type": "int"},
"dns_mode": {"required": False, "type": "str",
"choices": ["manual", "auto"]},
"domain": {"required": False, "type": "str"},
"dpd": {"required": False, "type": "str",
"choices": ["disable", "on-idle", "on-demand"]},
"dpd_retrycount": {"required": False, "type": "int"},
"dpd_retryinterval": {"required": False, "type": "str"},
"eap": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"eap_identity": {"required": False, "type": "str",
"choices": ["use-id-payload", "send-request"]},
"enforce_unique_id": {"required": False, "type": "str",
"choices": ["disable", "keep-new", "keep-old"]},
"forticlient_enforcement": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fragmentation": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"fragmentation_mtu": {"required": False, "type": "int"},
"group_authentication": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"group_authentication_secret": {"required": False, "type": "str"},
"ha_sync_esp_seqno": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"idle_timeout": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"idle_timeoutinterval": {"required": False, "type": "int"},
"ike_version": {"required": False, "type": "str",
"choices": ["1", "2"]},
"include_local_lan": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"interface": {"required": False, "type": "str"},
"ipv4_dns_server1": {"required": False, "type": "str"},
"ipv4_dns_server2": {"required": False, "type": "str"},
"ipv4_dns_server3": {"required": False, "type": "str"},
"ipv4_end_ip": {"required": False, "type": "str"},
"ipv4_exclude_range": {"required": False, "type": "list",
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start_ip": {"required": False, "type": "str"}
}},
"ipv4_name": {"required": False, "type": "str"},
"ipv4_netmask": {"required": False, "type": "str"},
"ipv4_split_exclude": {"required": False, "type": "str"},
"ipv4_split_include": {"required": False, "type": "str"},
"ipv4_start_ip": {"required": False, "type": "str"},
"ipv4_wins_server1": {"required": False, "type": "str"},
"ipv4_wins_server2": {"required": False, "type": "str"},
"ipv6_dns_server1": {"required": False, "type": "str"},
"ipv6_dns_server2": {"required": False, "type": "str"},
"ipv6_dns_server3": {"required": False, "type": "str"},
"ipv6_end_ip": {"required": False, "type": "str"},
"ipv6_exclude_range": {"required": False, "type": "list",
"options": {
"end_ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"start_ip": {"required": False, "type": "str"}
}},
"ipv6_name": {"required": False, "type": "str"},
"ipv6_prefix": {"required": False, "type": "int"},
"ipv6_split_exclude": {"required": False, "type": "str"},
"ipv6_split_include": {"required": False, "type": "str"},
"ipv6_start_ip": {"required": False, "type": "str"},
"keepalive": {"required": False, "type": "int"},
"keylife": {"required": False, "type": "int"},
"local_gw": {"required": False, "type": "str"},
"localid": {"required": False, "type": "str"},
"localid_type": {"required": False, "type": "str",
"choices": ["auto", "fqdn", "user-fqdn",
"keyid", "address", "asn1dn"]},
"mesh_selector_type": {"required": False, "type": "str",
"choices": ["disable", "subnet", "host"]},
"mode": {"required": False, "type": "str",
"choices": ["aggressive", "main"]},
"mode_cfg": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"name": {"required": True, "type": "str"},
"nattraversal": {"required": False, "type": "str",
"choices": ["enable", "disable", "forced"]},
"negotiate_timeout": {"required": False, "type": "int"},
"peer": {"required": False, "type": "str"},
"peergrp": {"required": False, "type": "str"},
"peerid": {"required": False, "type": "str"},
"peertype": {"required": False, "type": "str",
"choices": ["any", "one", "dialup",
"peer", "peergrp"]},
"ppk": {"required": False, "type": "str",
"choices": ["disable", "allow", "require"]},
"ppk_identity": {"required": False, "type": "str"},
"ppk_secret": {"required": False, "type": "str"},
"priority": {"required": False, "type": "int"},
"proposal": {"required": False, "type": "str",
"choices": ["des-md5", "des-sha1", "des-sha256",
"des-sha384", "des-sha512"]},
"psksecret": {"required": False, "type": "str"},
"psksecret_remote": {"required": False, "type": "str"},
"reauth": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"rekey": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"remote_gw": {"required": False, "type": "str"},
"remotegw_ddns": {"required": False, "type": "str"},
"rsa_signature_format": {"required": False, "type": "str",
"choices": ["pkcs1", "pss"]},
"save_password": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"send_cert_chain": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"signature_hash_alg": {"required": False, "type": "str",
"choices": ["sha1", "sha2-256", "sha2-384",
"sha2-512"]},
"split_include_service": {"required": False, "type": "str"},
"suite_b": {"required": False, "type": "str",
"choices": ["disable", "suite-b-gcm-128", "suite-b-gcm-256"]},
"type": {"required": False, "type": "str",
"choices": ["static", "dynamic", "ddns"]},
"unity_support": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"usrgrp": {"required": False, "type": "str"},
"wizard_type": {"required": False, "type": "str",
"choices": ["custom", "dialup-forticlient", "dialup-ios",
"dialup-android", "dialup-windows", "dialup-cisco",
"static-fortigate", "dialup-fortigate", "static-cisco",
"dialup-cisco-fw"]},
"xauthtype": {"required": False, "type": "str",
"choices": ["disable", "client", "pap",
"chap", "auto"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| kustodian/ansible | lib/ansible/modules/network/fortios/fortios_vpn_ipsec_phase1.py | Python | gpl-3.0 | 49,802 |
# (C) British Crown Copyright 2015 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the function
:func:`iris.analysis.cartography.rotate_winds`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import numpy.ma as ma
import cartopy.crs as ccrs
from iris.analysis.cartography import rotate_winds, unrotate_pole
from iris.cube import Cube
from iris.coords import DimCoord, AuxCoord
import iris.coord_systems
def uv_cubes(x=None, y=None):
"""Return u, v cubes with a grid in a rotated pole CRS."""
cs = iris.coord_systems.RotatedGeogCS(grid_north_pole_latitude=37.5,
grid_north_pole_longitude=177.5)
if x is None:
x = np.linspace(311.9, 391.1, 6)
if y is None:
y = np.linspace(-23.6, 24.8, 5)
x2d, y2d = np.meshgrid(x, y)
u = 10 * (2 * np.cos(2 * np.deg2rad(x2d) + 3 * np.deg2rad(y2d + 30)) ** 2)
v = 20 * np.cos(6 * np.deg2rad(x2d))
lon = DimCoord(x, standard_name='grid_longitude', units='degrees',
coord_system=cs)
lat = DimCoord(y, standard_name='grid_latitude', units='degrees',
coord_system=cs)
u_cube = Cube(u, standard_name='x_wind', units='m/s')
v_cube = Cube(v, standard_name='y_wind', units='m/s')
for cube in (u_cube, v_cube):
cube.add_dim_coord(lat.copy(), 0)
cube.add_dim_coord(lon.copy(), 1)
return u_cube, v_cube
def uv_cubes_3d(ref_cube, n_realization=3):
"""
Return 3d u, v cubes with a grid in a rotated pole CRS taken from
the provided 2d cube, by adding a realization dimension
coordinate bound to teh zeroth dimension.
"""
lat = ref_cube.coord('grid_latitude')
lon = ref_cube.coord('grid_longitude')
x2d, y2d = np.meshgrid(lon.points, lat.points)
u = 10 * (2 * np.cos(2 * np.deg2rad(x2d) + 3 * np.deg2rad(y2d + 30)) ** 2)
v = 20 * np.cos(6 * np.deg2rad(x2d))
# Multiply slices by factor to give variation over 0th dim.
factor = np.arange(1, n_realization + 1).reshape(n_realization, 1, 1)
u = factor * u
v = factor * v
realization = DimCoord(np.arange(n_realization), 'realization')
u_cube = Cube(u, standard_name='x_wind', units='m/s')
v_cube = Cube(v, standard_name='y_wind', units='m/s')
for cube in (u_cube, v_cube):
cube.add_dim_coord(realization.copy(), 0)
cube.add_dim_coord(lat.copy(), 1)
cube.add_dim_coord(lon.copy(), 2)
return u_cube, v_cube
class TestPrerequisites(tests.IrisTest):
def test_different_coord_systems(self):
u, v = uv_cubes()
v.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
with self.assertRaisesRegexp(
ValueError, 'Coordinates differ between u and v cubes'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_different_xy_coord_systems(self):
u, v = uv_cubes()
u.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
v.coord('grid_latitude').coord_system = iris.coord_systems.GeogCS(1)
with self.assertRaisesRegexp(
ValueError,
'Coordinate systems of x and y coordinates differ'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_different_shape(self):
x = np.linspace(311.9, 391.1, 6)
y = np.linspace(-23.6, 24.8, 5)
u, _ = uv_cubes(x, y)
_, v = uv_cubes(x[:-1], y)
with self.assertRaisesRegexp(ValueError, 'same shape'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_xy_dimensionality(self):
u, v = uv_cubes()
# Replace 1d lat with 2d lat.
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.meshgrid(x, y)
lat_2d = AuxCoord(y2d, 'grid_latitude', units='degrees',
coord_system=u.coord('grid_latitude').coord_system)
for cube in (u, v):
cube.remove_coord('grid_latitude')
cube.add_aux_coord(lat_2d.copy(), (0, 1))
with self.assertRaisesRegexp(
ValueError,
'x and y coordinates must have the same number of dimensions'):
rotate_winds(u, v, iris.coord_systems.OSGB())
def test_dim_mapping(self):
x = np.linspace(311.9, 391.1, 3)
y = np.linspace(-23.6, 24.8, 3)
u, v = uv_cubes(x, y)
v.transpose()
with self.assertRaisesRegexp(ValueError, 'Dimension mapping'):
rotate_winds(u, v, iris.coord_systems.OSGB())
class TestAnalyticComparison(tests.IrisTest):
@staticmethod
def _unrotate_equation(rotated_lons, rotated_lats,
rotated_us, rotated_vs, pole_lon, pole_lat):
# Perform a rotated-pole 'unrotate winds' transformation on arrays of
# rotated-lat, rotated-lon, u and v.
# This can be defined as an analytic function : cf. UMDP015
# Work out the rotation angles.
lambda_angle = np.radians(pole_lon - 180.0)
phi_angle = np.radians(90.0 - pole_lat)
# Get the locations in true lats+lons.
trueLongitude, trueLatitude = unrotate_pole(rotated_lons,
rotated_lats,
pole_lon,
pole_lat)
# Calculate inter-coordinate rotation coefficients.
cos_rot = (np.cos(np.radians(rotated_lons)) *
np.cos(np.radians(trueLongitude) - lambda_angle) +
np.sin(np.radians(rotated_lons)) *
np.sin(np.radians(trueLongitude) - lambda_angle) *
np.cos(phi_angle))
sin_rot = -((np.sin(np.radians(trueLongitude) - lambda_angle) *
np.sin(phi_angle)) /
np.cos(np.radians(rotated_lats)))
# Matrix-multiply to rotate the vectors.
u_true = rotated_us * cos_rot - rotated_vs * sin_rot
v_true = rotated_vs * cos_rot + rotated_us * sin_rot
return u_true, v_true
def _check_rotated_to_true(self, u_rot, v_rot, target_cs, **kwds):
# Run test calculation (numeric).
u_true, v_true = rotate_winds(u_rot, v_rot, target_cs)
# Perform same calculation via the reference method (equations).
cs_rot = u_rot.coord('grid_longitude').coord_system
pole_lat = cs_rot.grid_north_pole_latitude
pole_lon = cs_rot.grid_north_pole_longitude
rotated_lons = u_rot.coord('grid_longitude').points
rotated_lats = u_rot.coord('grid_latitude').points
rotated_lons_2d, rotated_lats_2d = np.meshgrid(
rotated_lons, rotated_lats)
rotated_u, rotated_v = u_rot.data, v_rot.data
u_ref, v_ref = self._unrotate_equation(rotated_lons_2d,
rotated_lats_2d,
rotated_u, rotated_v,
pole_lon, pole_lat)
# Check that all the numerical results are within given tolerances.
self.assertArrayAllClose(u_true.data, u_ref, **kwds)
self.assertArrayAllClose(v_true.data, v_ref, **kwds)
def test_rotated_to_true__small(self):
# Check for a small field with varying data.
target_cs = iris.coord_systems.GeogCS(6371229)
u_rot, v_rot = uv_cubes()
self._check_rotated_to_true(u_rot, v_rot, target_cs,
rtol=1e-5, atol=0.0005)
def test_rotated_to_true_global(self):
# Check for global fields with various constant wind values
# - constant in the rotated pole system, that is.
# We expect less accuracy where this gets close to the true poles.
target_cs = iris.coord_systems.GeogCS(6371229)
u_rot, v_rot = uv_cubes(x=np.arange(0, 360.0, 15),
y=np.arange(-89, 89, 10))
for vector in ((1, 0), (0, 1), (1, 1), (-3, -1.5)):
u_rot.data[...] = vector[0]
v_rot.data[...] = vector[1]
self._check_rotated_to_true(u_rot, v_rot, target_cs,
rtol=5e-4, atol=5e-4,
err_msg='vector={}'.format(vector))
class TestRotatedToOSGB(tests.IrisTest):
# Define some coordinate ranges for the uv_cubes 'standard' RotatedPole
# system, that exceed the OSGB margins, but not by "too much".
_rp_x_min, _rp_x_max = -5.0, 5.0
_rp_y_min, _rp_y_max = -5.0, 15.0
def _uv_cubes_limited_extent(self):
# Make test cubes suitable for transforming to OSGB, as the standard
# 'uv_cubes' result goes too far outside, leading to errors.
x = np.linspace(self._rp_x_min, self._rp_x_max, 6)
y = np.linspace(self._rp_y_min, self._rp_y_max, 5)
return uv_cubes(x=x, y=y)
def test_name(self):
u, v = self._uv_cubes_limited_extent()
u.rename('bob')
v.rename('alice')
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
self.assertEqual(ut.name(), 'transformed_' + u.name())
self.assertEqual(vt.name(), 'transformed_' + v.name())
def test_new_coords(self):
u, v = self._uv_cubes_limited_extent()
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.meshgrid(x, y)
src_crs = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
tgt_crs = ccrs.OSGB()
xyz_tran = tgt_crs.transform_points(src_crs, x2d, y2d)
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
points = xyz_tran[..., 0].reshape(x2d.shape)
expected_x = AuxCoord(points,
standard_name='projection_x_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_x_coordinate'), expected_x)
self.assertEqual(vt.coord('projection_x_coordinate'), expected_x)
points = xyz_tran[..., 1].reshape(y2d.shape)
expected_y = AuxCoord(points,
standard_name='projection_y_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_y_coordinate'), expected_y)
self.assertEqual(vt.coord('projection_y_coordinate'), expected_y)
def test_new_coords_transposed(self):
u, v = self._uv_cubes_limited_extent()
# Transpose cubes so that cube is in xy order rather than the
# typical yx order of meshgrid.
u.transpose()
v.transpose()
x = u.coord('grid_longitude').points
y = u.coord('grid_latitude').points
x2d, y2d = np.meshgrid(x, y)
src_crs = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
tgt_crs = ccrs.OSGB()
xyz_tran = tgt_crs.transform_points(src_crs, x2d, y2d)
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
points = xyz_tran[..., 0].reshape(x2d.shape)
expected_x = AuxCoord(points,
standard_name='projection_x_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_x_coordinate'), expected_x)
self.assertEqual(vt.coord('projection_x_coordinate'), expected_x)
points = xyz_tran[..., 1].reshape(y2d.shape)
expected_y = AuxCoord(points,
standard_name='projection_y_coordinate',
units='m',
coord_system=iris.coord_systems.OSGB())
self.assertEqual(ut.coord('projection_y_coordinate'), expected_y)
self.assertEqual(vt.coord('projection_y_coordinate'), expected_y)
# Check dim mapping for 2d coords is yx.
expected_dims = (u.coord_dims('grid_latitude') +
u.coord_dims('grid_longitude'))
self.assertEqual(ut.coord_dims('projection_x_coordinate'),
expected_dims)
self.assertEqual(ut.coord_dims('projection_y_coordinate'),
expected_dims)
self.assertEqual(vt.coord_dims('projection_x_coordinate'),
expected_dims)
self.assertEqual(vt.coord_dims('projection_y_coordinate'),
expected_dims)
def test_orig_coords(self):
u, v = self._uv_cubes_limited_extent()
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
self.assertEqual(u.coord('grid_latitude'), ut.coord('grid_latitude'))
self.assertEqual(v.coord('grid_latitude'), vt.coord('grid_latitude'))
self.assertEqual(u.coord('grid_longitude'), ut.coord('grid_longitude'))
self.assertEqual(v.coord('grid_longitude'), vt.coord('grid_longitude'))
def test_magnitude_preservation(self):
u, v = self._uv_cubes_limited_extent()
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
orig_sq_mag = u.data**2 + v.data**2
res_sq_mag = ut.data**2 + vt.data**2
self.assertArrayAllClose(orig_sq_mag, res_sq_mag, rtol=5e-4)
def test_data_values(self):
u, v = self._uv_cubes_limited_extent()
# Slice out 4 points that lie in and outside OSGB extent.
u = u[1:3, 3:5]
v = v[1:3, 3:5]
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Values precalculated and checked.
expected_ut_data = np.array([[0.16285514, 0.35323639],
[1.82650698, 2.62455840]])
expected_vt_data = np.array([[19.88979966, 19.01921346],
[19.88018847, 19.01424281]])
# Compare u and v data values against previously calculated values.
self.assertArrayAllClose(ut.data, expected_ut_data, rtol=1e-5)
self.assertArrayAllClose(vt.data, expected_vt_data, rtol=1e-5)
def test_nd_data(self):
u2d, y2d = self._uv_cubes_limited_extent()
u, v = uv_cubes_3d(u2d)
u = u[:, 1:3, 3:5]
v = v[:, 1:3, 3:5]
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Values precalculated and checked (as test_data_values above),
# then scaled by factor [1, 2, 3] along 0th dim (see uv_cubes_3d()).
expected_ut_data = np.array([[0.16285514, 0.35323639],
[1.82650698, 2.62455840]])
expected_vt_data = np.array([[19.88979966, 19.01921346],
[19.88018847, 19.01424281]])
factor = np.array([1, 2, 3]).reshape(3, 1, 1)
expected_ut_data = factor * expected_ut_data
expected_vt_data = factor * expected_vt_data
# Compare u and v data values against previously calculated values.
self.assertArrayAlmostEqual(ut.data, expected_ut_data)
self.assertArrayAlmostEqual(vt.data, expected_vt_data)
def test_transposed(self):
# Test case where the coordinates are not ordered yx in the cube.
u, v = self._uv_cubes_limited_extent()
# Slice out 4 points that lie in and outside OSGB extent.
u = u[1:3, 3:5]
v = v[1:3, 3:5]
# Transpose cubes (in-place)
u.transpose()
v.transpose()
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Values precalculated and checked.
expected_ut_data = np.array([[0.16285514, 0.35323639],
[1.82650698, 2.62455840]]).T
expected_vt_data = np.array([[19.88979966, 19.01921346],
[19.88018847, 19.01424281]]).T
# Compare u and v data values against previously calculated values.
self.assertArrayAllClose(ut.data, expected_ut_data, rtol=1e-5)
self.assertArrayAllClose(vt.data, expected_vt_data, rtol=1e-5)
class TestMasking(tests.IrisTest):
def test_rotated_to_osgb(self):
# Rotated Pole data with large extent.
x = np.linspace(311.9, 391.1, 10)
y = np.linspace(-23.6, 24.8, 8)
u, v = uv_cubes(x, y)
ut, vt = rotate_winds(u, v, iris.coord_systems.OSGB())
# Ensure cells with discrepancies in magnitude are masked.
self.assertTrue(ma.isMaskedArray(ut.data))
self.assertTrue(ma.isMaskedArray(vt.data))
# Snapshot of mask with fixed tolerance of atol=2e-3
expected_mask = np.array([[1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1]], np.bool)
self.assertArrayEqual(expected_mask, ut.data.mask)
self.assertArrayEqual(expected_mask, vt.data.mask)
# Check unmasked values have sufficiently small error in mag.
expected_mag = np.sqrt(u.data**2 + v.data**2)
# Use underlying data to ignore mask in calculation.
res_mag = np.sqrt(ut.data.data**2 + vt.data.data**2)
# Calculate percentage error (note there are no zero magnitudes
# so we can divide safely).
anom = 100.0 * np.abs(res_mag - expected_mag) / expected_mag
self.assertTrue(anom[~ut.data.mask].max() < 0.1)
def test_rotated_to_unrotated(self):
# Suffiently accurate so that no mask is introduced.
u, v = uv_cubes()
ut, vt = rotate_winds(u, v, iris.coord_systems.GeogCS(6371229))
self.assertFalse(ma.isMaskedArray(ut.data))
self.assertFalse(ma.isMaskedArray(vt.data))
class TestRoundTrip(tests.IrisTest):
def test_rotated_to_unrotated(self):
# Check ability to use 2d coords as input.
u, v = uv_cubes()
ut, vt = rotate_winds(u, v, iris.coord_systems.GeogCS(6371229))
# Remove grid lat and lon, leaving 2d projection coords.
ut.remove_coord('grid_latitude')
vt.remove_coord('grid_latitude')
ut.remove_coord('grid_longitude')
vt.remove_coord('grid_longitude')
# Change back.
orig_cs = u.coord('grid_latitude').coord_system
res_u, res_v = rotate_winds(ut, vt, orig_cs)
# Check data values - limited accuracy due to numerical approx.
self.assertArrayAlmostEqual(res_u.data, u.data, decimal=3)
self.assertArrayAlmostEqual(res_v.data, v.data, decimal=3)
# Check coords locations.
x2d, y2d = np.meshgrid(u.coord('grid_longitude').points,
u.coord('grid_latitude').points)
# Shift longitude from 0 to 360 -> -180 to 180.
x2d = np.where(x2d > 180, x2d - 360, x2d)
res_x = res_u.coord('projection_x_coordinate',
coord_system=orig_cs).points
res_y = res_u.coord('projection_y_coordinate',
coord_system=orig_cs).points
self.assertArrayAlmostEqual(res_x, x2d)
self.assertArrayAlmostEqual(res_y, y2d)
res_x = res_v.coord('projection_x_coordinate',
coord_system=orig_cs).points
res_y = res_v.coord('projection_y_coordinate',
coord_system=orig_cs).points
self.assertArrayAlmostEqual(res_x, x2d)
self.assertArrayAlmostEqual(res_y, y2d)
if __name__ == "__main__":
tests.main()
| QuLogic/iris | lib/iris/tests/unit/analysis/cartography/test_rotate_winds.py | Python | gpl-3.0 | 20,624 |
# -*- coding: utf-8 -*-
import math
EARTH_RADIUS = 6367009
METERS_PER_DEGREE = 111319.0
FEET_PER_METER = 3.2808399
def geographic_distance(loc1, loc2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians,
[loc1.latitude, loc1.longitude,
loc2.latitude, loc2.longitude])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * \
math.cos(lat2) * math.sin(dlon / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
return EARTH_RADIUS * c
| kerel-fs/skylines | skylines/lib/geo.py | Python | agpl-3.0 | 737 |
"""Support for Roku API emulation."""
import voluptuous as vol
from homeassistant import config_entries, util
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from .binding import EmulatedRoku
from .config_flow import configured_servers
from .const import (
CONF_ADVERTISE_IP, CONF_ADVERTISE_PORT, CONF_HOST_IP, CONF_LISTEN_PORT,
CONF_SERVERS, CONF_UPNP_BIND_MULTICAST, DOMAIN)
SERVER_CONFIG_SCHEMA = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_LISTEN_PORT): cv.port,
vol.Optional(CONF_HOST_IP): cv.string,
vol.Optional(CONF_ADVERTISE_IP): cv.string,
vol.Optional(CONF_ADVERTISE_PORT): cv.port,
vol.Optional(CONF_UPNP_BIND_MULTICAST): cv.boolean
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_SERVERS):
vol.All(cv.ensure_list, [SERVER_CONFIG_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the emulated roku component."""
conf = config.get(DOMAIN)
if conf is None:
return True
existing_servers = configured_servers(hass)
for entry in conf[CONF_SERVERS]:
if entry[CONF_NAME] not in existing_servers:
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN,
context={'source': config_entries.SOURCE_IMPORT},
data=entry
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up an emulated roku server from a config entry."""
config = config_entry.data
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
name = config[CONF_NAME]
listen_port = config[CONF_LISTEN_PORT]
host_ip = config.get(CONF_HOST_IP) or util.get_local_ip()
advertise_ip = config.get(CONF_ADVERTISE_IP)
advertise_port = config.get(CONF_ADVERTISE_PORT)
upnp_bind_multicast = config.get(CONF_UPNP_BIND_MULTICAST)
server = EmulatedRoku(hass, name, host_ip, listen_port,
advertise_ip, advertise_port, upnp_bind_multicast)
hass.data[DOMAIN][name] = server
return await server.setup()
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
name = entry.data[CONF_NAME]
server = hass.data[DOMAIN].pop(name)
return await server.unload()
| jabesq/home-assistant | homeassistant/components/emulated_roku/__init__.py | Python | apache-2.0 | 2,359 |
import time
import pytest
import logging
from repair_tests.repair_test import BaseRepairTest
since = pytest.mark.since
logger = logging.getLogger(__name__)
LEGACY_SSTABLES_JVM_ARGS = ["-Dcassandra.streamdes.initial_mem_buffer_size=1",
"-Dcassandra.streamdes.max_mem_buffer_size=5",
"-Dcassandra.streamdes.max_spill_file_size=16"]
# We don't support directly upgrading from 2.2 to 4.0 so disabling this on 4.0.
# TODO: we should probably not hardcode versions?
@pytest.mark.upgrade_test
@since('3.0', max_version='3.99')
class TestUpgradeRepair(BaseRepairTest):
@since('3.0', max_version='3.99')
def test_repair_after_upgrade(self):
"""
@jira_ticket CASSANDRA-10990
"""
default_install_dir = self.cluster.get_install_dir()
cluster = self.cluster
logger.debug("Setting version to 2.2.5")
cluster.set_install_dir(version="2.2.5")
self._populate_cluster()
self._do_upgrade(default_install_dir)
self._repair_and_verify(True)
def _do_upgrade(self, default_install_dir):
cluster = self.cluster
for node in cluster.nodelist():
logger.debug("Upgrading %s to current version" % node.name)
if node.is_running():
node.flush()
time.sleep(1)
node.stop(wait_other_notice=True)
node.set_install_dir(install_dir=default_install_dir)
node.start(wait_other_notice=True, wait_for_binary_proto=True)
cursor = self.patient_cql_connection(node)
cluster.set_install_dir(default_install_dir)
| aweisberg/cassandra-dtest | upgrade_tests/repair_test.py | Python | apache-2.0 | 1,656 |
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import volumeops
import nova.volume.driver
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class XenSMDriver(nova.volume.driver.VolumeDriver):
def _convert_config_params(self, conf_str):
params = dict([item.split("=") for item in conf_str.split()])
return params
def _get_introduce_sr_keys(self, params):
if 'name_label' in params:
del params['name_label']
keys = params.keys()
keys.append('sr_type')
return keys
def _create_storage_repo(self, context, backend_ref):
"""Either creates or introduces SR on host
depending on whether it exists in xapi db."""
params = self._convert_config_params(backend_ref['config_params'])
if 'name_label' in params:
label = params['name_label']
del params['name_label']
else:
label = 'SR-' + str(backend_ref['id'])
params['sr_type'] = backend_ref['sr_type']
if backend_ref['sr_uuid'] is None:
# run the sr create command
try:
LOG.debug(_('SR name = %s') % label)
LOG.debug(_('Params: %s') % str(params))
sr_uuid = self._volumeops.create_sr(label, params)
# update sr_uuid and created in db
except Exception as ex:
LOG.debug(_("Failed to create sr %s...continuing") %
str(backend_ref['id']))
msg = _('Create failed')
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug(_('SR UUID of new SR is: %s') % sr_uuid)
try:
self.db.sm_backend_conf_update(context,
backend_ref['id'],
dict(sr_uuid=sr_uuid))
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to update db")
raise exception.VolumeBackendAPIException(data=msg)
else:
# sr introduce, if not already done
try:
self._volumeops.introduce_sr(backend_ref['sr_uuid'], label,
params)
except Exception as ex:
LOG.exception(ex)
LOG.debug(_("Failed to introduce sr %s...continuing")
% str(backend_ref['id']))
def _create_storage_repos(self, context):
"""Create/Introduce storage repositories at start."""
backends = self.db.sm_backend_conf_get_all(context)
for backend in backends:
try:
self._create_storage_repo(context, backend)
except Exception as ex:
LOG.exception(ex)
msg = _('Failed to reach backend %d') % backend['id']
raise exception.VolumeBackendAPIException(data=msg)
def __init__(self, *args, **kwargs):
"""Connect to the hypervisor."""
# This driver leverages Xen storage manager, and hence requires
# hypervisor to be Xen
if FLAGS.connection_type != 'xenapi':
msg = _('XenSMDriver requires xenapi connection')
raise exception.VolumeBackendAPIException(data=msg)
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
try:
session = xenapi_conn.XenAPISession(url, username, password)
self._volumeops = volumeops.VolumeOps(session)
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to initiate session")
raise exception.VolumeBackendAPIException(data=msg)
super(XenSMDriver, self).__init__(execute=utils.execute,
sync_exec=utils.execute,
*args, **kwargs)
def do_setup(self, ctxt):
"""Setup includes creating or introducing storage repos
existing in the database and destroying deleted ones."""
# TODO(renukaapte) purge storage repos
self.ctxt = ctxt
self._create_storage_repos(ctxt)
def create_volume(self, volume):
"""Creates a logical volume. Can optionally return a Dictionary of
changes to the volume object to be persisted."""
# For now the scheduling logic will be to try to fit the volume in
# the first available backend.
# TODO(renukaapte) better scheduling once APIs are in place
sm_vol_rec = None
backends = self.db.sm_backend_conf_get_all(self.ctxt)
for backend in backends:
# Ensure that storage repo exists, if not create.
# This needs to be done because if nova compute and
# volume are both running on this host, then, as a
# part of detach_volume, compute could potentially forget SR
self._create_storage_repo(self.ctxt, backend)
sm_vol_rec = self._volumeops.create_volume_for_sm(volume,
backend['sr_uuid'])
if sm_vol_rec:
LOG.debug(_('Volume will be created in backend - %d')
% backend['id'])
break
if sm_vol_rec:
# Update db
sm_vol_rec['id'] = volume['id']
sm_vol_rec['backend_id'] = backend['id']
try:
self.db.sm_volume_create(self.ctxt, sm_vol_rec)
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to update volume in db")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Unable to create volume')
raise exception.VolumeBackendAPIException(data=msg)
def delete_volume(self, volume):
vol_rec = self.db.sm_volume_get(self.ctxt, volume['id'])
if not vol_rec:
raise exception.NotFound(_("Volume %s does not exist"),
volume['id'])
try:
# If compute runs on this node, detach could have disconnected SR
backend_ref = self.db.sm_backend_conf_get(self.ctxt,
vol_rec['backend_id'])
self._create_storage_repo(self.ctxt, backend_ref)
self._volumeops.delete_volume_for_sm(vol_rec['vdi_uuid'])
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to delete vdi")
raise exception.VolumeBackendAPIException(data=msg)
try:
self.db.sm_volume_delete(self.ctxt, volume['id'])
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to delete volume in db")
raise exception.VolumeBackendAPIException(data=msg)
def local_path(self, volume):
return str(volume['id'])
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
pass
def discover_volume(self, context, volume):
return str(volume['id'])
def check_for_setup_error(self):
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def ensure_export(self, context, volume):
"""Safely, synchronously recreates an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
try:
xensm_properties = dict(self.db.sm_volume_get(self.ctxt,
volume['id']))
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to find volume in db")
raise exception.VolumeBackendAPIException(data=msg)
# Keep the volume id key consistent with what ISCSI driver calls it
xensm_properties['volume_id'] = xensm_properties['id']
del xensm_properties['id']
try:
backend_conf = self.db.sm_backend_conf_get(self.ctxt,
xensm_properties['backend_id'])
except Exception as ex:
LOG.exception(ex)
msg = _("Failed to find backend in db")
raise exception.VolumeBackendAPIException(data=msg)
params = self._convert_config_params(backend_conf['config_params'])
xensm_properties['flavor_id'] = backend_conf['flavor_id']
xensm_properties['sr_uuid'] = backend_conf['sr_uuid']
xensm_properties['sr_type'] = backend_conf['sr_type']
xensm_properties.update(params)
_introduce_sr_keys = self._get_introduce_sr_keys(params)
xensm_properties['introduce_sr_keys'] = _introduce_sr_keys
return {
'driver_volume_type': 'xensm',
'data': xensm_properties
}
def terminate_connection(self, volume, connector):
pass
| tylertian/Openstack | openstack F/nova/nova/volume/xensm.py | Python | apache-2.0 | 9,841 |
"""Support for Hydrawise cloud switches."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import (
ALLOWED_WATERING_TIME, CONF_WATERING_TIME, DATA_HYDRAWISE,
DEFAULT_WATERING_TIME, DEVICE_MAP, DEVICE_MAP_INDEX, SWITCHES,
HydrawiseEntity)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS, default=SWITCHES):
vol.All(cv.ensure_list, [vol.In(SWITCHES)]),
vol.Optional(CONF_WATERING_TIME, default=DEFAULT_WATERING_TIME):
vol.All(vol.In(ALLOWED_WATERING_TIME)),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Hydrawise device."""
hydrawise = hass.data[DATA_HYDRAWISE].data
default_watering_timer = config.get(CONF_WATERING_TIME)
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
# Create a switch for each zone
for zone in hydrawise.relays:
sensors.append(
HydrawiseSwitch(default_watering_timer, zone, sensor_type))
add_entities(sensors, True)
class HydrawiseSwitch(HydrawiseEntity, SwitchDevice):
"""A switch implementation for Hydrawise device."""
def __init__(self, default_watering_timer, *args):
"""Initialize a switch for Hydrawise device."""
super().__init__(*args)
self._default_watering_timer = default_watering_timer
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
if self._sensor_type == 'manual_watering':
self.hass.data[DATA_HYDRAWISE].data.run_zone(
self._default_watering_timer, (self.data['relay']-1))
elif self._sensor_type == 'auto_watering':
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(
0, (self.data['relay']-1))
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._sensor_type == 'manual_watering':
self.hass.data[DATA_HYDRAWISE].data.run_zone(
0, (self.data['relay']-1))
elif self._sensor_type == 'auto_watering':
self.hass.data[DATA_HYDRAWISE].data.suspend_zone(
365, (self.data['relay']-1))
def update(self):
"""Update device state."""
mydata = self.hass.data[DATA_HYDRAWISE].data
_LOGGER.debug("Updating Hydrawise switch: %s", self._name)
if self._sensor_type == 'manual_watering':
if not mydata.running:
self._state = False
else:
self._state = int(
mydata.running[0]['relay']) == self.data['relay']
elif self._sensor_type == 'auto_watering':
for relay in mydata.relays:
if relay['relay'] == self.data['relay']:
if relay.get('suspended') is not None:
self._state = False
else:
self._state = True
break
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return DEVICE_MAP[self._sensor_type][
DEVICE_MAP_INDEX.index('ICON_INDEX')]
| MartinHjelmare/home-assistant | homeassistant/components/hydrawise/switch.py | Python | apache-2.0 | 3,451 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Index for object_folders
Revision ID: 1efacad0fff5
Revises: 4d7ce1eaddf2
Create Date: 2014-09-12 21:11:35.908034
"""
# revision identifiers, used by Alembic.
revision = '1efacad0fff5'
down_revision = '4d7ce1eaddf2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ix_folderable_id_type', 'object_folders', ['folderable_type','folderable_id'])
pass
def downgrade():
op.drop_index('ix_folderable_id_type', table_name='object_folders')
pass
| NejcZupec/ggrc-core | src/ggrc_gdrive_integration/migrations/versions/20140912211135_1efacad0fff5_index_for_object_folders.py | Python | apache-2.0 | 609 |
"""Base test class for nbconvert"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import glob
import shlex
import shutil
import sys
import unittest
import nbconvert
from subprocess import Popen, PIPE
import nose.tools as nt
from nbformat import v4, write
from testpath.tempdir import TemporaryWorkingDirectory
from ipython_genutils.py3compat import string_types, bytes_to_str
class TestsBase(unittest.TestCase):
"""Base tests class. Contains useful fuzzy comparison and nbconvert
functions."""
def fuzzy_compare(self, a, b, newlines_are_spaces=True, tabs_are_spaces=True,
fuzzy_spacing=True, ignore_spaces=False,
ignore_newlines=False, case_sensitive=False, leave_padding=False):
"""
Performs a fuzzy comparison of two strings. A fuzzy comparison is a
comparison that ignores insignificant differences in the two comparands.
The significance of certain differences can be specified via the keyword
parameters of this method.
"""
if not leave_padding:
a = a.strip()
b = b.strip()
if ignore_newlines:
a = a.replace('\n', '')
b = b.replace('\n', '')
if newlines_are_spaces:
a = a.replace('\n', ' ')
b = b.replace('\n', ' ')
if tabs_are_spaces:
a = a.replace('\t', ' ')
b = b.replace('\t', ' ')
if ignore_spaces:
a = a.replace(' ', '')
b = b.replace(' ', '')
if fuzzy_spacing:
a = self.recursive_replace(a, ' ', ' ')
b = self.recursive_replace(b, ' ', ' ')
if not case_sensitive:
a = a.lower()
b = b.lower()
self.assertEqual(a, b)
def recursive_replace(self, text, search, replacement):
"""
Performs a recursive replacement operation. Replaces all instances
of a search string in a text string with a replacement string until
the search string no longer exists. Recursion is needed because the
replacement string may generate additional search strings.
For example:
Replace "ii" with "i" in the string "Hiiii" yields "Hii"
Another replacement cds "Hi" (the desired output)
Parameters
----------
text : string
Text to replace in.
search : string
String to search for within "text"
replacement : string
String to replace "search" with
"""
while search in text:
text = text.replace(search, replacement)
return text
def create_temp_cwd(self, copy_filenames=None):
temp_dir = TemporaryWorkingDirectory()
#Copy the files if requested.
if copy_filenames is not None:
self.copy_files_to(copy_filenames, dest=temp_dir.name)
#Return directory handler
return temp_dir
def create_empty_notebook(self, path):
nb = v4.new_notebook()
with io.open(path, 'w', encoding='utf-8') as f:
write(nb, f, 4)
def copy_files_to(self, copy_filenames, dest='.'):
"Copy test files into the destination directory"
if not os.path.isdir(dest):
os.makedirs(dest)
files_path = self._get_files_path()
for pattern in copy_filenames:
files = glob.glob(os.path.join(files_path, pattern))
assert files
for match in files:
shutil.copyfile(match, os.path.join(dest, os.path.basename(match)))
def _get_files_path(self):
#Get the relative path to this module in the IPython directory.
names = self.__module__.split('.')[1:-1]
names.append('files')
#Build a path using the nbconvert directory and the relative path we just
#found.
path = os.path.dirname(nbconvert.__file__)
return os.path.join(path, *names)
def nbconvert(self, parameters, ignore_return_code=False, stdin=None):
"""
Run nbconvert as a shell command, listening for both Errors and
non-zero return codes. Returns the tuple (stdout, stderr) of
output produced during the nbconvert run.
Parameters
----------
parameters : str, list(str)
List of parameters to pass to IPython.
ignore_return_code : optional bool (default False)
Throw an OSError if the return code
"""
if isinstance(parameters, string_types):
parameters = shlex.split(parameters)
cmd = [sys.executable, '-m', 'nbconvert'] + parameters
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
stdout, stderr = p.communicate(input=stdin)
if not (p.returncode == 0 or ignore_return_code):
raise OSError(bytes_to_str(stderr))
return stdout.decode('utf8', 'replace'), stderr.decode('utf8', 'replace')
def assert_big_text_equal(a, b, chunk_size=80):
"""assert that large strings are equal
Zooms in on first chunk that differs,
to give better info than vanilla assertEqual for large text blobs.
"""
for i in range(0, len(a), chunk_size):
chunk_a = a[i:i + chunk_size]
chunk_b = b[i:i + chunk_size]
nt.assert_equal(chunk_a, chunk_b, "[offset: %i]\n%r != \n%r" % (
i, chunk_a, chunk_b))
if len(a) > len(b):
nt.fail("Length doesn't match (%i > %i). Extra text:\n%r" % (
len(a), len(b), a[len(b):]
))
elif len(a) < len(b):
nt.fail("Length doesn't match (%i < %i). Extra text:\n%r" % (
len(a), len(b), b[len(a):]
))
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/nbconvert/tests/base.py | Python | bsd-2-clause | 5,773 |
from sqlalchemy import *
from sqlalchemy.test import *
class FoundRowsTest(TestBase, AssertsExecutionResults):
"""tests rowcount functionality"""
__requires__ = ('sane_rowcount', )
@classmethod
def setup_class(cls):
global employees_table, metadata
metadata = MetaData(testing.db)
employees_table = Table('employees', metadata,
Column('employee_id', Integer,
Sequence('employee_id_seq', optional=True),
primary_key=True),
Column('name', String(50)),
Column('department', String(1)),
)
metadata.create_all()
def setup(self):
global data
data = [ ('Angela', 'A'),
('Andrew', 'A'),
('Anand', 'A'),
('Bob', 'B'),
('Bobette', 'B'),
('Buffy', 'B'),
('Charlie', 'C'),
('Cynthia', 'C'),
('Chris', 'C') ]
i = employees_table.insert()
i.execute(*[{'name':n, 'department':d} for n, d in data])
def teardown(self):
employees_table.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
def testbasic(self):
s = employees_table.select()
r = s.execute().fetchall()
assert len(r) == len(data)
def test_update_rowcount1(self):
# WHERE matches 3, 3 rows changed
department = employees_table.c.department
r = employees_table.update(department=='C').execute(department='Z')
print "expecting 3, dialect reports %s" % r.rowcount
assert r.rowcount == 3
def test_update_rowcount2(self):
# WHERE matches 3, 0 rows changed
department = employees_table.c.department
r = employees_table.update(department=='C').execute(department='C')
print "expecting 3, dialect reports %s" % r.rowcount
assert r.rowcount == 3
def test_delete_rowcount(self):
# WHERE matches 3, 3 rows deleted
department = employees_table.c.department
r = employees_table.delete(department=='C').execute()
print "expecting 3, dialect reports %s" % r.rowcount
assert r.rowcount == 3
| dbbhattacharya/kitsune | vendor/packages/sqlalchemy/test/sql/test_rowcount.py | Python | bsd-3-clause | 2,268 |
from datetime import datetime
from decimal import Decimal
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext_lazy as _
from l10n.utils import moneyfmt
from payment.modules.giftcertificate.utils import generate_certificate_code
from payment.utils import get_processor_by_key
from product.models import Product
from satchmo_store.contact.models import Contact
from satchmo_store.shop.models import Order
import logging
GIFTCODE_KEY = 'GIFTCODE'
log = logging.getLogger('giftcertificate.models')
SATCHMO_PRODUCT = True
def get_product_types():
return ("GiftcertificateProduct",)
class GiftCertificateManager(models.Manager):
def from_order(self, order):
code = order.get_variable(GIFTCODE_KEY, "")
log.debug("GiftCert.from_order code=%s", code)
if code:
site = order.site
return GiftCertificate.objects.get(code__exact=code.value, valid__exact=True, site=site)
raise GiftCertificate.DoesNotExist()
class GiftCertificate(models.Model):
"""A Gift Cert which holds value."""
site = models.ForeignKey(Site, null=True, blank=True, verbose_name=_('Site'))
order = models.ForeignKey(Order, null=True, blank=True, related_name="giftcertificates", verbose_name=_('Order'))
code = models.CharField(_('Certificate Code'), max_length=100,
blank=True, null=True)
purchased_by = models.ForeignKey(Contact, verbose_name=_('Purchased by'),
blank=True, null=True, related_name='giftcertificates_purchased')
date_added = models.DateField(_("Date added"), null=True, blank=True)
valid = models.BooleanField(_('Valid'), default=True)
message = models.CharField(_('Message'), blank=True, null=True, max_length=255)
recipient_email = models.EmailField(_("Email"), blank=True, max_length=75)
start_balance = models.DecimalField(_("Starting Balance"), decimal_places=2,
max_digits=8)
objects = GiftCertificateManager()
def balance(self):
b = Decimal(self.start_balance)
for usage in self.usages.all():
log.info('usage: %s' % usage)
b = b - Decimal(usage.balance_used)
return b
balance = property(balance)
def apply_to_order(self, order):
"""Apply up to the full amount of the balance of this cert to the order.
Returns new balance.
"""
amount = min(order.balance, self.balance)
log.info('applying %s from giftcert #%i [%s] to order #%i [%s]',
moneyfmt(amount),
self.id,
moneyfmt(self.balance),
order.id,
moneyfmt(order.balance))
processor = get_processor_by_key('PAYMENT_GIFTCERTIFICATE')
orderpayment = processor.record_payment(order=order, amount=amount)
self.orderpayment = orderpayment
return self.use(amount, orderpayment=orderpayment)
def use(self, amount, notes="", orderpayment=None):
"""Use some amount of the gift cert, returning the current balance."""
u = GiftCertificateUsage(notes=notes, balance_used = amount,
orderpayment=orderpayment, giftcertificate=self)
u.save()
return self.balance
def save(self, **kwargs):
if not self.pk:
self.date_added = datetime.now()
if not self.code:
self.code = generate_certificate_code()
if not self.site:
self.site = Site.objects.get_current()
super(GiftCertificate, self).save(**kwargs)
def __unicode__(self):
sb = moneyfmt(self.start_balance)
b = moneyfmt(self.balance)
return u"Gift Cert: %s/%s" % (sb, b)
class Meta:
verbose_name = _("Gift Certificate")
verbose_name_plural = _("Gift Certificates")
class GiftCertificateUsage(models.Model):
"""Any usage of a Gift Cert is logged with one of these objects."""
usage_date = models.DateField(_("Date of usage"), null=True, blank=True)
notes = models.TextField(_('Notes'), blank=True, null=True)
balance_used = models.DecimalField(_("Amount Used"), decimal_places=2,
max_digits=8, )
orderpayment = models.ForeignKey('shop.OrderPayment', null=True, verbose_name=_('Order Payment'))
used_by = models.ForeignKey(Contact, verbose_name=_('Used by'),
blank=True, null=True, related_name='giftcertificates_used')
giftcertificate = models.ForeignKey(GiftCertificate, related_name='usages')
def __unicode__(self):
return u"GiftCertificateUsage: %s" % self.balance_used
def save(self, **kwargs):
if not self.pk:
self.usage_date = datetime.now()
super(GiftCertificateUsage, self).save(**kwargs)
class GiftCertificateProduct(models.Model):
"""
The product model for a Gift Certificate
"""
product = models.OneToOneField(Product, verbose_name=_('Product'), primary_key=True)
is_shippable = False
discountable = False
def __unicode__(self):
return u"GiftCertificateProduct: %s" % self.product.name
def _get_subtype(self):
return 'GiftCertificateProduct'
def order_success(self, order, order_item):
log.debug("Order success called, creating gift certs on order: %s", order)
message = ""
email = ""
for detl in order_item.orderitemdetail_set.all():
if detl.name == "email":
email = detl.value
elif detl.name == "message":
message = detl.value
price=order_item.line_item_price
log.debug("Creating gc for %s", price)
gc = GiftCertificate(
order = order,
start_balance= price,
purchased_by = order.contact,
valid=True,
message=message,
recipient_email=email
)
gc.save()
def save(self, **kwargs):
if hasattr(self.product,'_sub_types'):
del self.product._sub_types
super(GiftCertificateProduct, self).save(**kwargs)
class Meta:
verbose_name = _("Gift certificate product")
verbose_name_plural = _("Gift certificate products")
import config
PAYMENT_PROCESSOR=True
| russellmayhew/satchmo | satchmo/apps/payment/modules/giftcertificate/models.py | Python | bsd-3-clause | 6,200 |
##########################################################################
#
# Copyright (c) 2009-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import IECore
import IECoreRI
class TeapotProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self )
self.boundCalled = False
self.renderCalled = False
self.renderStateCalled = False
def doBound( self, args ) :
self.boundCalled = True
return IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) )
def doRenderState( self, renderer, args ) :
self.renderStateCalled = True
renderer.setAttribute( "ri:visibility:diffuse", IECore.BoolData( 1 ) )
def doRender( self, renderer, args ) :
self.renderCalled = True
renderer.geometry( "teapot", {}, {} )
class ParameterisedProceduralTest( IECoreRI.TestCase ) :
def checkContents( self, fileName, expectedElements, unexpectedElements ) :
l = file( fileName ).readlines()
lineIndex = 0
for expected in expectedElements :
found = False
for i in range( lineIndex, len( l ) ) :
if expected in l[i] :
lineIndex = i
found = True
break
self.assert_( found )
for e in unexpectedElements :
for ll in l :
self.assert_( e not in ll )
def testNormalCall( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"AttributeBegin",
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
"Geometry \"teapot\"",
"AttributeEnd",
],
[]
)
self.assertEqual( t.renderStateCalled, True )
self.assertEqual( t.boundCalled, True )
self.assertEqual( t.renderCalled, True )
def testStateOnly( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r, inAttributeBlock=False, withState=True, withGeometry=False )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
],
[
"AttributeBegin",
"Geometry \"teapot\"",
"AttributeEnd",
],
)
self.assertEqual( t.renderStateCalled, True )
self.assertEqual( t.boundCalled, False )
self.assertEqual( t.renderCalled, False )
def testImmediateGeometryOnly( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testParameterisedProcedural.rib" )
r.worldBegin()
t = TeapotProcedural()
t.render( r, inAttributeBlock=False, withState=False, withGeometry=True, immediateGeometry=True )
r.worldEnd()
self.checkContents(
"test/IECoreRI/output/testParameterisedProcedural.rib",
[
"Geometry \"teapot\"",
],
[
"AttributeBegin",
"Attribute \"visibility\" \"int diffuse\" [ 1 ]",
"AttributeEnd",
],
)
self.assertEqual( t.renderStateCalled, False )
self.assertEqual( t.boundCalled, False )
self.assertEqual( t.renderCalled, True )
if __name__ == "__main__":
unittest.main()
| lento/cortex | test/IECoreRI/ParameterisedProcedural.py | Python | bsd-3-clause | 4,780 |
import dask.dataframe as dd
import pandas.util.testing as tm
import pandas as pd
from dask.dataframe.shuffle import shuffle
import partd
from dask.async import get_sync
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [1, 4, 7]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [2, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [3, 6, 9]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', ['a', 'b'], [0, 4, 9, 9])
full = d.compute()
def test_shuffle():
s = shuffle(d, d.b, npartitions=2)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == 2
x = get_sync(s.dask, (s._name, 0))
y = get_sync(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert shuffle(d, d.b, npartitions=2)._name == shuffle(d, d.b, npartitions=2)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_index_with_non_series():
tm.assert_frame_equal(shuffle(d, d.b).compute(),
shuffle(d, 'b').compute())
def test_index_with_dataframe():
assert sorted(shuffle(d, d[['b']]).compute().values.tolist()) ==\
sorted(shuffle(d, ['b']).compute().values.tolist()) ==\
sorted(shuffle(d, 'b').compute().values.tolist())
def test_shuffle_from_one_partition_to_one_other():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, 'x', i)
assert len(a.compute(get=get_sync)) == len(b.compute(get=get_sync))
| vikhyat/dask | dask/dataframe/tests/test_shuffle.py | Python | bsd-3-clause | 1,642 |
import argparse
import os
import subprocess
import mongoengine as me
import rmc.html_snapshots.utils as utils
import rmc.shared.constants as c
def crawl_page(url):
args = [
'phantomjs',
'--disk-cache=true',
os.path.join(utils.FILE_DIR, 'phantom-server.js'),
url,
]
rendered_html = subprocess.check_output(args)
return rendered_html
def generate_snapshots(base_url, overwrite=False):
urls = utils.generate_urls()
for url in urls:
# For urls that end with a trailing slash, create them
# as the index page of a subdirectory
if url and url[0] == '/':
url = url[1:]
if not url:
file_path = 'index'
file_url = ''
elif url[-1] == '/':
file_path = url + 'index'
file_url = url
else:
file_path = url
file_url = url
file_path = os.path.join(utils.HTML_DIR, file_path)
if os.path.isdir(file_path):
print 'Cannot have file_path that is directory: %s' % file_path
if not overwrite and os.path.isfile(file_path):
continue
full_url = os.path.join(base_url, file_url)
rendered_html = crawl_page(full_url)
print 'Writing: %s' % url
utils.write(file_path, rendered_html)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Process snapshotting arguments')
parser.add_argument('base_url', type=str)
parser.add_argument('--overwrite', dest='overwrite', action='store_true')
args = parser.parse_args()
me.connect(c.MONGO_DB_RMC, host=c.MONGO_HOST, port=c.MONGO_PORT)
generate_snapshots(args.base_url, args.overwrite)
| JGulbronson/rmc | html_snapshots/snapshot.py | Python | mit | 1,734 |
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
| welblade/pyrom | test/__init__.py | Python | mit | 49 |
from sympy import symbols, Symbol, exp, log, pi, Rational, S
from sympy.codegen.cfunctions import (
expm1, log1p, exp2, log2, fma, log10, Sqrt, Cbrt, hypot
)
from sympy.core.function import expand_log
def test_expm1():
# Eval
assert expm1(0) == 0
x = Symbol('x', real=True, finite=True)
# Expand and rewrite
assert expm1(x).expand(func=True) - exp(x) == -1
assert expm1(x).rewrite('tractable') - exp(x) == -1
assert expm1(x).rewrite('exp') - exp(x) == -1
# Precision
assert not ((exp(1e-10).evalf() - 1) - 1e-10 - 5e-21) < 1e-22 # for comparison
assert abs(expm1(1e-10).evalf() - 1e-10 - 5e-21) < 1e-22
# Properties
assert expm1(x).is_real
assert expm1(x).is_finite
# Diff
assert expm1(42*x).diff(x) - 42*exp(42*x) == 0
assert expm1(42*x).diff(x) - expm1(42*x).expand(func=True).diff(x) == 0
def test_log1p():
# Eval
assert log1p(0) == 0
d = S(10)
assert expand_log(log1p(d**-1000) - log(d**1000 + 1) + log(d**1000)) == 0
x = Symbol('x', real=True, finite=True)
# Expand and rewrite
assert log1p(x).expand(func=True) - log(x + 1) == 0
assert log1p(x).rewrite('tractable') - log(x + 1) == 0
assert log1p(x).rewrite('log') - log(x + 1) == 0
# Precision
assert not abs(log(1e-99 + 1).evalf() - 1e-99) < 1e-100 # for comparison
assert abs(expand_log(log1p(1e-99)).evalf() - 1e-99) < 1e-100
# Properties
assert log1p(-2**(-S(1)/2)).is_real
assert not log1p(-1).is_finite
assert log1p(pi).is_finite
assert not log1p(x).is_positive
assert log1p(Symbol('y', positive=True)).is_positive
assert not log1p(x).is_zero
assert log1p(Symbol('z', zero=True)).is_zero
assert not log1p(x).is_nonnegative
assert log1p(Symbol('o', nonnegative=True)).is_nonnegative
# Diff
assert log1p(42*x).diff(x) - 42/(42*x + 1) == 0
assert log1p(42*x).diff(x) - log1p(42*x).expand(func=True).diff(x) == 0
def test_exp2():
# Eval
assert exp2(2) == 4
x = Symbol('x', real=True, finite=True)
# Expand
assert exp2(x).expand(func=True) - 2**x == 0
# Diff
assert exp2(42*x).diff(x) - 42*exp2(42*x)*log(2) == 0
assert exp2(42*x).diff(x) - exp2(42*x).diff(x) == 0
def test_log2():
# Eval
assert log2(8) == 3
assert log2(pi) != log(pi)/log(2) # log2 should *save* (CPU) instructions
x = Symbol('x', real=True, finite=True)
assert log2(x) != log(x)/log(2)
assert log2(2**x) == x
# Expand
assert log2(x).expand(func=True) - log(x)/log(2) == 0
# Diff
assert log2(42*x).diff() - 1/(log(2)*x) == 0
assert log2(42*x).diff() - log2(42*x).expand(func=True).diff(x) == 0
def test_fma():
x, y, z = symbols('x y z')
# Expand
assert fma(x, y, z).expand(func=True) - x*y - z == 0
expr = fma(17*x, 42*y, 101*z)
# Diff
assert expr.diff(x) - expr.expand(func=True).diff(x) == 0
assert expr.diff(y) - expr.expand(func=True).diff(y) == 0
assert expr.diff(z) - expr.expand(func=True).diff(z) == 0
assert expr.diff(x) - 17*42*y == 0
assert expr.diff(y) - 17*42*x == 0
assert expr.diff(z) - 101 == 0
def test_log10():
x = Symbol('x')
# Expand
assert log10(x).expand(func=True) - log(x)/log(10) == 0
# Diff
assert log10(42*x).diff(x) - 1/(log(10)*x) == 0
assert log10(42*x).diff(x) - log10(42*x).expand(func=True).diff(x) == 0
def test_Cbrt():
x = Symbol('x')
# Expand
assert Cbrt(x).expand(func=True) - x**Rational(1, 3) == 0
# Diff
assert Cbrt(42*x).diff(x) - 42*(42*x)**(Rational(1, 3) - 1)/3 == 0
assert Cbrt(42*x).diff(x) - Cbrt(42*x).expand(func=True).diff(x) == 0
def test_Sqrt():
x = Symbol('x')
# Expand
assert Sqrt(x).expand(func=True) - x**Rational(1, 2) == 0
# Diff
assert Sqrt(42*x).diff(x) - 42*(42*x)**(Rational(1, 2) - 1)/2 == 0
assert Sqrt(42*x).diff(x) - Sqrt(42*x).expand(func=True).diff(x) == 0
def test_hypot():
x, y = symbols('x y')
# Expand
assert hypot(x, y).expand(func=True) - (x**2 + y**2)**Rational(1, 2) == 0
# Diff
assert hypot(17*x, 42*y).diff(x).expand(func=True) - hypot(17*x, 42*y).expand(func=True).diff(x) == 0
assert hypot(17*x, 42*y).diff(y).expand(func=True) - hypot(17*x, 42*y).expand(func=True).diff(y) == 0
assert hypot(17*x, 42*y).diff(x).expand(func=True) - 2*17*17*x*((17*x)**2 + (42*y)**2)**Rational(-1, 2)/2 == 0
assert hypot(17*x, 42*y).diff(y).expand(func=True) - 2*42*42*y*((17*x)**2 + (42*y)**2)**Rational(-1, 2)/2 == 0
| wxgeo/geophar | wxgeometrie/sympy/codegen/tests/test_cfunctions.py | Python | gpl-2.0 | 4,553 |
# https://djangosnippets.org/snippets/2566/
from django import template
from django.template import resolve_variable, NodeList
from django.contrib.auth.models import Group
register = template.Library()
@register.tag()
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins|Group1|"Group 2" %} ... {% endifusergroup %}, or
{% ifusergroup Admins %} ... {% else %} ... {% endifusergroup %}
"""
try:
_, group = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifusergroup',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(group, nodelist_true, nodelist_false)
class GroupCheckNode(template.Node):
def __init__(self, group, nodelist_true, nodelist_false):
self.group = group
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
def render(self, context):
user = resolve_variable('user', context)
if not user.is_authenticated():
return self.nodelist_false.render(context)
for group in self.group.split("|"):
group = group[1:-1] if group.startswith('"') and group.endswith('"') else group
try:
if Group.objects.get(name=group) in user.groups.all():
return self.nodelist_true.render(context)
except Group.DoesNotExist:
pass
return self.nodelist_false.render(context)
| mupi/tecsaladeaula | core/templatetags/usergroup.py | Python | agpl-3.0 | 1,920 |
from machinekit import hal
from machinekit import rtapi as rt
from machinekit import config as c
from fdm.config import base
def hardware_read():
hal.addf('hpg.capture-position', 'servo-thread')
hal.addf('bb_gpio.read', 'servo-thread')
def hardware_write():
hal.addf('hpg.update', 'servo-thread')
hal.addf('bb_gpio.write', 'servo-thread')
def init_hardware():
watchList = []
# load low-level drivers
rt.loadrt('hal_bb_gpio', output_pins='816,822,823,824,825,826,914,923,925', input_pins='807,808,809,810,817,911,913')
prubin = '%s/%s' % (c.Config().EMC2_RTLIB_DIR, c.find('PRUCONF', 'PRUBIN'))
rt.loadrt(c.find('PRUCONF', 'DRIVER'),
pru=0, num_stepgens=6, num_pwmgens=6,
prucode=prubin, halname='hpg')
# Python user-mode HAL module to read ADC value and generate a thermostat output for PWM
defaultThermistor = 'semitec_103GT_2'
hal.loadusr('hal_temp_bbb',
name='temp',
interval=0.05,
filter_size=1,
cape_board='CRAMPS',
channels='04:%s,05:%s,02:%s,03:%s'
% (c.find('HBP', 'THERMISTOR', defaultThermistor),
c.find('EXTRUDER_0', 'THERMISTOR', defaultThermistor),
c.find('EXTRUDER_1', 'THERMISTOR', defaultThermistor),
c.find('EXTRUDER_2', 'THERMISTOR', defaultThermistor)),
wait_name='temp')
watchList.append(['temp', 0.1])
base.usrcomp_status('temp', 'temp-hw', thread='servo-thread')
base.usrcomp_watchdog(watchList, 'estop-reset', thread='servo-thread',
errorSignal='watchdog-error')
def setup_hardware(thread):
# PWM
hal.Pin('hpg.pwmgen.00.pwm_period').set(10000000) # 100Hz
hal.Pin('hpg.pwmgen.00.out.00.pin').set(811)
hal.Pin('hpg.pwmgen.00.out.01.pin').set(915)
hal.Pin('hpg.pwmgen.00.out.02.pin').set(927)
hal.Pin('hpg.pwmgen.00.out.03.pin').set(921)
hal.Pin('hpg.pwmgen.00.out.04.pin').set(941)
hal.Pin('hpg.pwmgen.00.out.05.pin').set(922)
# HBP
hal.Pin('hpg.pwmgen.00.out.00.enable').set(True)
hal.Pin('hpg.pwmgen.00.out.00.value').link('hbp-temp-pwm')
# configure extruders
for n in range(0, 3):
hal.Pin('hpg.pwmgen.00.out.%02i.enable' % (n + 1)).set(True)
hal.Pin('hpg.pwmgen.00.out.%02i.value' % (n + 1)).link('e%i-temp-pwm' % n)
# configure fans
for n in range(0, 2):
hal.Pin('hpg.pwmgen.00.out.%02i.enable' % (n + 4)).link('f%i-pwm-enable' % n)
hal.Pin('hpg.pwmgen.00.out.%02i.value' % (n + 4)).link('f%i-pwm' % n)
hal.Signal('f%i-pwm-enable' % n).set(True)
# configure leds
# none
# GPIO
hal.Pin('bb_gpio.p8.in-08').link('limit-0-home') # X
hal.Pin('bb_gpio.p8.in-07').link('limit-0-max') # X
hal.Pin('bb_gpio.p8.in-10').link('limit-1-home') # Y
hal.Pin('bb_gpio.p8.in-09').link('limit-1-max') # Y
hal.Pin('bb_gpio.p9.in-13').link('limit-2-home') # Z
hal.Pin('bb_gpio.p9.in-11').link('limit-2-max') # Z
# probe ...
# Adjust as needed for your switch polarity
hal.Pin('bb_gpio.p8.in-08.invert').set(True)
hal.Pin('bb_gpio.p8.in-07.invert').set(True)
hal.Pin('bb_gpio.p8.in-10.invert').set(True)
hal.Pin('bb_gpio.p8.in-09.invert').set(True)
hal.Pin('bb_gpio.p9.in-13.invert').set(True)
hal.Pin('bb_gpio.p9.in-11.invert').set(True)
# ADC
hal.Pin('temp.ch-04.value').link('hbp-temp-meas')
hal.Pin('temp.ch-05.value').link('e0-temp-meas')
hal.Pin('temp.ch-02.value').link('e1-temp-meas')
hal.Pin('temp.ch-03.value').link('e2-temp-meas')
# Stepper
hal.Pin('hpg.stepgen.00.steppin').set(813)
hal.Pin('hpg.stepgen.00.dirpin').set(812)
hal.Pin('hpg.stepgen.01.steppin').set(815)
hal.Pin('hpg.stepgen.01.dirpin').set(814)
hal.Pin('hpg.stepgen.02.steppin').set(819)
hal.Pin('hpg.stepgen.02.dirpin').set(818)
hal.Pin('hpg.stepgen.03.steppin').set(916)
hal.Pin('hpg.stepgen.03.dirpin').set(912)
hal.Pin('hpg.stepgen.04.steppin').set(917)
hal.Pin('hpg.stepgen.04.dirpin').set(918)
hal.Pin('hpg.stepgen.05.steppin').set(924)
hal.Pin('hpg.stepgen.05.dirpin').set(926)
# machine power
hal.Pin('bb_gpio.p9.out-23').link('emcmot-0-enable')
#hal.Pin('bb_gpio.p9.out-23.invert').set(True)
# Monitor estop input from hardware
hal.Pin('bb_gpio.p8.in-17').link('estop-in')
hal.Pin('bb_gpio.p8.in-17.invert').set(True)
# drive estop-sw
hal.Pin('bb_gpio.p8.out-26').link('estop-out')
hal.Pin('bb_gpio.p8.out-26.invert').set(True)
# Tie machine power signal to the Parport Cape LED
# Feel free to tie any other signal you like to the LED
hal.Pin('bb_gpio.p9.out-25').link('emcmot-0-enable')
# hal.Pin('bb_gpio.p9.out-25.invert').set(True)
# link emcmot.xx.enable to stepper driver enable signals
hal.Pin('bb_gpio.p9.out-14').link('emcmot-0-enable')
hal.Pin('bb_gpio.p9.out-14.invert').set(True)
| strahlex/machinekit | configs/ARM/BeagleBone/MendelMax-CRAMPS/cramps.py | Python | lgpl-2.1 | 5,005 |
"""
Copyright 2009 55 Minutes (http://www.55minutes.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
test_timestamp = time.strftime('%a %Y-%m-%d %H:%M %Z')
TOP = """\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />
<title>Test coverage report: %(title)s</title>
<style type="text/css" media="screen">
body
{
font-family: "Lucida Sans Unicode", "Lucida Grande", sans-serif;
font-size: 13px;
}
#content-header
{
margin-left: 50px;
}
#content-header h1
{
font-size: 18px;
margin-bottom: 0;
}
#content-header p
{
font-size: 13px;
margin: 0;
color: #909090;
}
#result-list
{
margin: 0 50px;
}
#result-list ul
{
padding-left: 13px;
list-style-position: inside;
}
</style>
</head>
<body>
"""
CONTENT_HEADER = """\
<div id="content-header">
<h1>Test Coverage Report: %(title)s</h1>"""
CONTENT_HEADER += "<p>Generated: %(test_timestamp)s</p>" %vars()
CONTENT_HEADER += "</div>"
CONTENT_BODY = """\
<div id="result-list">
<p>%(long_desc)s</p>
<ul>
%(exception_list)s
</ul>
Back to <a href="index.html">index</a>.
</div>
"""
EXCEPTION_LINE = "<li>%(module_name)s</li>"
BOTTOM = """\
</body>
</html>
"""
| UT-Austin-FIS/django-coverage | django_coverage/utils/coverage_report/templates/default_module_exceptions.py | Python | apache-2.0 | 2,058 |
import sublime
import collections
VAR_MAP_LEADER = 'mapleader'
VAR_MAP_LOCAL_LEADER = 'maplocalleader'
# well-known variables
_SPECIAL_STRINGS = {
'<leader>': VAR_MAP_LEADER,
'<localleader>': VAR_MAP_LOCAL_LEADER,
}
_DEFAULTS = {
VAR_MAP_LEADER: '\\',
VAR_MAP_LOCAL_LEADER: '\\'
}
_VARIABLES = {
}
def expand_keys(seq):
'''Replaces well-known variables in key names with their corresponding
values.
'''
leader = var_name = None
# TODO(guillermooo): Can these variables appear in the middle of a
# sequence instead of at the beginning only?
if seq.lower().startswith('<leader>'):
var_name = '<leader>'
leader = _VARIABLES.get('mapleader', _DEFAULTS.get('mapleader'))
if seq.lower().startswith('<localleader>'):
var = '<localleader>'
local_leader = _VARIABLES.get('maplocalleader',
_DEFAULTS.get('maplocalleader'))
try:
return leader + seq[len(var_name):]
except TypeError:
return seq
def is_key_name(name):
return name.lower() in _SPECIAL_STRINGS
def get(name):
name = name.lower()
name = _SPECIAL_STRINGS.get(name, name)
return _VARIABLES.get(name, _DEFAULTS.get(name))
def set_(name, value):
# TODO(guillermooo): Set vars in settings.
_VARIABLES[name] = value
class Variables(object):
'''Stores variables during the current Sublime Text session.
Meant to be used as a descriptor with `State`.
'''
def __get__(self, instance, owner):
self.view = instance.view
self.settings = instance.settings
return self
def get(self, name):
return get(name)
def set(self, name, value):
return set_(name, value)
| zhangtuoparis13/Vintageous | vi/variables.py | Python | mit | 1,741 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields, osv
from tools.translate import _
class account_move_line_reconcile(osv.osv_memory):
"""
Account move line reconcile wizard, it checks for the write off the reconcile entry or directly reconcile.
"""
_name = 'account.move.line.reconcile'
_description = 'Account move line reconcile'
_columns = {
'trans_nbr': fields.integer('# of Transaction', readonly=True),
'credit': fields.float('Credit amount', readonly=True),
'debit': fields.float('Debit amount', readonly=True),
'writeoff': fields.float('Write-Off amount', readonly=True),
}
def default_get(self, cr, uid, fields, context=None):
res = super(account_move_line_reconcile, self).default_get(cr, uid, fields, context=context)
data = self.trans_rec_get(cr, uid, context['active_ids'], context)
if 'trans_nbr' in fields:
res.update({'trans_nbr':data['trans_nbr']})
if 'credit' in fields:
res.update({'credit':data['credit']})
if 'debit' in fields:
res.update({'debit':data['debit']})
if 'writeoff' in fields:
res.update({'writeoff':data['writeoff']})
return res
def trans_rec_get(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
credit = debit = 0
account_id = False
count = 0
for line in account_move_line_obj.browse(cr, uid, context['active_ids'], context=context):
if not line.reconcile_id and not line.reconcile_id.id:
count += 1
credit += line.credit
debit += line.debit
account_id = line.account_id.id
return {'trans_nbr': count, 'account_id': account_id, 'credit': credit, 'debit': debit, 'writeoff': debit - credit}
def trans_rec_addendum_writeoff(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_addendum(cr, uid, ids, context)
def trans_rec_reconcile_partial_reconcile(self, cr, uid, ids, context=None):
return self.pool.get('account.move.line.reconcile.writeoff').trans_rec_reconcile_partial(cr, uid, ids, context)
def trans_rec_reconcile_full(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
date = False
period_id = False
journal_id= False
account_id = False
if context is None:
context = {}
date = time.strftime('%Y-%m-%d')
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
#stop the reconciliation process by partner (manual reconciliation) only if there is nothing more to reconcile for this partner
if 'active_ids' in context and context['active_ids']:
tmp_ml_id = account_move_line_obj.browse(cr, uid, context['active_ids'], context)[0]
partner_id = tmp_ml_id.partner_id and tmp_ml_id.partner_id.id or False
debit_ml_ids = account_move_line_obj.search(cr, uid, [('partner_id', '=', partner_id), ('account_id.reconcile', '=', True), ('reconcile_id', '=', False), ('debit', '>', 0)], context=context)
credit_ml_ids = account_move_line_obj.search(cr, uid, [('partner_id', '=', partner_id), ('account_id.reconcile', '=', True), ('reconcile_id', '=', False), ('credit', '>', 0)], context=context)
for ml_id in context['active_ids']:
if ml_id in debit_ml_ids:
debit_ml_ids.remove(ml_id)
if ml_id in credit_ml_ids:
credit_ml_ids.remove(ml_id)
if not debit_ml_ids and credit_ml_ids:
context.update({'stop_reconcile': True})
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
account_move_line_reconcile()
class account_move_line_reconcile_writeoff(osv.osv_memory):
"""
It opens the write off wizard form, in that user can define the journal, account, analytic account for reconcile
"""
_name = 'account.move.line.reconcile.writeoff'
_description = 'Account move line reconcile (writeoff)'
_columns = {
'journal_id': fields.many2one('account.journal','Write-Off Journal', required=True),
'writeoff_acc_id': fields.many2one('account.account','Write-Off account', required=True),
'date_p': fields.date('Date'),
'comment': fields.char('Comment', size= 64, required=True),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account', domain=[('parent_id', '!=', False)]),
}
_defaults = {
'date_p': lambda *a: time.strftime('%Y-%m-%d'),
'comment': 'Write-off',
}
def trans_rec_addendum(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','account_move_line_reconcile_writeoff')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Reconcile Writeoff'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.move.line.reconcile.writeoff',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
def trans_rec_reconcile_partial(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
if context is None:
context = {}
account_move_line_obj.reconcile_partial(cr, uid, context['active_ids'], 'manual', context=context)
return {'type': 'ir.actions.act_window_close'}
def trans_rec_reconcile(self, cr, uid, ids, context=None):
account_move_line_obj = self.pool.get('account.move.line')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
data = self.read(cr, uid, ids,context=context)[0]
account_id = data['writeoff_acc_id'][0]
context['date_p'] = data['date_p']
journal_id = data['journal_id'][0]
context['comment'] = data['comment']
if data['analytic_id']:
context['analytic_id'] = data['analytic_id'][0]
if context['date_p']:
date = context['date_p']
ids = period_obj.find(cr, uid, dt=date, context=context)
if ids:
period_id = ids[0]
context.update({'stop_reconcile': True})
account_move_line_obj.reconcile(cr, uid, context['active_ids'], 'manual', account_id,
period_id, journal_id, context=context)
return {'type': 'ir.actions.act_window_close'}
account_move_line_reconcile_writeoff()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | Johnzero/erp | openerp/addons/account/wizard/account_reconcile.py | Python | agpl-3.0 | 8,252 |
"""Provides the constants needed for component."""
from typing import Final
SUPPORT_ALARM_ARM_HOME: Final = 1
SUPPORT_ALARM_ARM_AWAY: Final = 2
SUPPORT_ALARM_ARM_NIGHT: Final = 4
SUPPORT_ALARM_TRIGGER: Final = 8
SUPPORT_ALARM_ARM_CUSTOM_BYPASS: Final = 16
SUPPORT_ALARM_ARM_VACATION: Final = 32
CONDITION_TRIGGERED: Final = "is_triggered"
CONDITION_DISARMED: Final = "is_disarmed"
CONDITION_ARMED_HOME: Final = "is_armed_home"
CONDITION_ARMED_AWAY: Final = "is_armed_away"
CONDITION_ARMED_NIGHT: Final = "is_armed_night"
CONDITION_ARMED_VACATION: Final = "is_armed_vacation"
CONDITION_ARMED_CUSTOM_BYPASS: Final = "is_armed_custom_bypass"
| jawilson/home-assistant | homeassistant/components/alarm_control_panel/const.py | Python | apache-2.0 | 642 |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from os import listdir, unlink
from os.path import join as path_join
from unittest import main
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import hash_path, readconf
from swift.obj.diskfile import write_metadata, read_metadata, get_data_dir
from test.probe.common import ReplProbeTest
RETRIES = 5
def get_data_file_path(obj_dir):
files = []
# We might need to try a few times if a request hasn't yet settled. For
# instance, a PUT can return success when just 2 of 3 nodes has completed.
for attempt in xrange(RETRIES + 1):
try:
files = sorted(listdir(obj_dir), reverse=True)
break
except Exception:
if attempt < RETRIES:
time.sleep(1)
else:
raise
for filename in files:
return path_join(obj_dir, filename)
class TestObjectFailures(ReplProbeTest):
def _setup_data_file(self, container, obj, data):
client.put_container(self.url, self.token, container,
headers={'X-Storage-Policy':
self.policy.name})
client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEquals(odata, data)
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
node_id = (onode['port'] - 6000) / 10
device = onode['device']
hash_str = hash_path(self.account, container, obj)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
get_data_dir(self.policy),
opart, hash_str[-3:], hash_str)
data_file = get_data_file_path(obj_dir)
return onode, opart, data_file
def run_quarantine(self):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'VERIFY')
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})[-1]
self.assertEquals(odata, 'VERIFY')
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_range_etag(self):
container = 'container-range-%s' % uuid4()
obj = 'object-range-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'RANGE')
metadata = read_metadata(data_file)
metadata['ETag'] = 'badetag'
write_metadata(data_file, metadata)
base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx}
for header, result in [({'Range': 'bytes=0-2'}, 'RAN'),
({'Range': 'bytes=1-11'}, 'ANGE'),
({'Range': 'bytes=0-11'}, 'RANGE')]:
req_headers = base_headers.copy()
req_headers.update(header)
odata = direct_client.direct_get_object(
onode, opart, self.account, container, obj,
headers=req_headers)[-1]
self.assertEquals(odata, result)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, headers={
'X-Backend-Storage-Policy-Index': self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_get(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_get_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_head(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
direct_client.direct_head_object(
onode, opart, self.account, container, obj, conn_timeout=1,
response_timeout=1, headers={'X-Backend-Storage-Policy-Index':
self.policy.idx})
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def run_quarantine_zero_byte_post(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
with open(data_file, 'w') as fpointer:
write_metadata(fpointer, metadata)
try:
headers = {'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two',
'X-Backend-Storage-Policy-Index': self.policy.idx}
direct_client.direct_post_object(
onode, opart, self.account,
container, obj,
headers=headers,
conn_timeout=1,
response_timeout=1)
raise Exception("Did not quarantine object")
except ClientException as err:
self.assertEquals(err.http_status, 404)
def test_runner(self):
self.run_quarantine()
self.run_quarantine_range_etag()
self.run_quarantine_zero_byte_get()
self.run_quarantine_zero_byte_head()
self.run_quarantine_zero_byte_post()
if __name__ == '__main__':
main()
| kun--hust/sccloud | test/probe/test_object_failures.py | Python | apache-2.0 | 7,804 |
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances was deprecated in version 0.18 "
"and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess was deprecated in version 0.18 and will be "
"removed in 0.20. Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class was deprecated in version 0.18 and will be
removed in 0.20. Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/stable/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| potash/scikit-learn | sklearn/gaussian_process/gaussian_process.py | Python | bsd-3-clause | 35,041 |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import os
from os import path
import pkgutil
import shutil
import sys
import tempfile
import threading
import unittest
from six import moves
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
from grpc_tools import protoc
from tests.unit.framework.common import test_constants
_RELATIVE_PROTO_PATH = 'relative_proto_path'
_RELATIVE_PYTHON_OUT = 'relative_python_out'
_PROTO_FILES_PATH_COMPONENTS = (
(
'beta_grpc_plugin_test',
'payload',
'test_payload.proto',
),
(
'beta_grpc_plugin_test',
'requests',
'r',
'test_requests.proto',
),
(
'beta_grpc_plugin_test',
'responses',
'test_responses.proto',
),
(
'beta_grpc_plugin_test',
'service',
'test_service.proto',
),
)
_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
STUB_IDENTIFIER = 'BetaTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
@contextlib.contextmanager
def _system_path(path_insertion):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
def _create_directory_tree(root, path_components_sequence):
created = set()
for path_components in path_components_sequence:
thus_far = ''
for path_component in path_components:
relative_path = path.join(thus_far, path_component)
if relative_path not in created:
os.makedirs(path.join(root, relative_path))
created.add(relative_path)
thus_far = path.join(thus_far, path_component)
def _massage_proto_content(raw_proto_content):
imports_substituted = raw_proto_content.replace(
b'import "tests/protoc_plugin/protos/',
b'import "beta_grpc_plugin_test/')
package_statement_substituted = imports_substituted.replace(
b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
return package_statement_substituted
def _packagify(directory):
for subdirectory, _, _ in os.walk(directory):
init_file_name = path.join(subdirectory, '__init__.py')
with open(init_file_name, 'wb') as init_file:
init_file.write(b'')
class _ServicerMethods(object):
def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
self._payload_pb2 = payload_pb2
self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = self._responses_pb2.SimpleResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield servicer_methods, stub
server.stop(0)
@contextlib.contextmanager
def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Args:
service_pb2: The service_pb2 module generated by this test.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
pass
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield None, stub
server.stop(0)
def _streaming_input_request_iterator(payload_pb2, requests_pb2):
for _ in range(3):
request = requests_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def setUp(self):
self._directory = tempfile.mkdtemp(dir='.')
self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
os.makedirs(self._proto_path)
os.makedirs(self._python_out)
directories_path_components = {
proto_file_path_components[:-1]
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS
}
_create_directory_tree(self._proto_path, directories_path_components)
self._proto_file_names = set()
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS:
raw_proto_content = pkgutil.get_data(
'tests.protoc_plugin.protos',
path.join(*proto_file_path_components[1:]))
massaged_proto_content = _massage_proto_content(raw_proto_content)
proto_file_name = path.join(self._proto_path,
*proto_file_path_components)
with open(proto_file_name, 'wb') as proto_file:
proto_file.write(massaged_proto_content)
self._proto_file_names.add(proto_file_name)
def tearDown(self):
shutil.rmtree(self._directory)
def _protoc(self):
args = [
'',
'--proto_path={}'.format(self._proto_path),
'--python_out={}'.format(self._python_out),
'--grpc_python_out=grpc_1_0:{}'.format(self._python_out),
] + list(self._proto_file_names)
protoc_exit_code = protoc.main(args)
self.assertEqual(0, protoc_exit_code)
_packagify(self._python_out)
with _system_path([self._python_out]):
self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2)
self._requests_pb2 = importlib.import_module(_REQUESTS_PB2)
self._responses_pb2 = importlib.import_module(_RESPONSES_PB2)
self._service_pb2 = importlib.import_module(_SERVICE_PB2)
def testImportAttributes(self):
self._protoc()
# check that we can access the generated module and its members.
self.assertIsNotNone(
getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2):
self._requests_pb2.SimpleRequest(response_size=13)
def testIncompleteServicer(self):
self._protoc()
with _CreateIncompleteService(self._service_pb2) as (_, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
try:
stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
except face.AbortionError as error:
self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED,
error.code)
def testUnaryCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testUnaryCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.fail():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.pause():
responses = stub.StreamingOutputCall(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testStreamingOutputCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testStreamingOutputCallFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testStreamingInputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
response = stub.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
def testStreamingInputCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
responses = stub.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator,
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testFullDuplexCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testFullDuplexCallFailed(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testHalfDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.LONG_TIMEOUT)
expected_responses = methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for check in moves.zip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
self._protoc()
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with wait():
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(face.ExpirationError):
next(responses)
if __name__ == '__main__':
unittest.main(verbosity=2)
| endlessm/chromium-browser | third_party/grpc/src/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py | Python | bsd-3-clause | 26,919 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": true,
"desired_state": true,
"previous_state": true
},
"vvold": {
"current_state": true,
"desired_state": true,
"previous_state": true
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
fw_change_list.append(True)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
| maartenq/ansible | lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py | Python | gpl-3.0 | 8,047 |
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import (
InvalidElementStateException, WebDriverException)
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=E0611
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for count in range(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
course_name = world.scenario_dict['COURSE'].display_name.replace(' ', '_')
main_page_link = '/course/{org}.{number}.{name}/branch/draft/block/{name}'.format(
org=world.scenario_dict['COURSE'].org,
number=world.scenario_dict['COURSE'].number,
name=course_name
)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
assignment_menu_css = 'ul.menu > li > a'
# First assert that it is there, make take a bit to redraw
assert_true(
world.css_find(assignment_menu_css),
msg="Could not find assignment menu"
)
assignment_menu = world.css_find(assignment_menu_css)
allnames = [item.html for item in assignment_menu]
if do_not:
assert_not_in(name, allnames)
else:
assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I have populated the course')
def populate_course(step):
step.given('I have added a new section')
step.given('I have added a new subsection')
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
| hkawasaki/kawasaki-aio8-1 | cms/djangoapps/contentstore/features/grading.py | Python | agpl-3.0 | 7,108 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.cached_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def testWrongNumbers(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
@test_util.run_deprecated_v1
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| jbedorf/tensorflow | tensorflow/python/kernel_tests/where_op_test.py | Python | apache-2.0 | 8,003 |
import sys, imp
from . import model, ffiplatform
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
imp.acquire_lock()
try:
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise ffiplatform.VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
finally:
imp.release_lock()
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
((type)( \
sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0)))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/cffi/vengine_cpy.py | Python | apache-2.0 | 41,164 |
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
from __future__ import unicode_literals
import functools
import re
import warnings
from importlib import import_module
from threading import local
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import cached_property, lazy
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import get_language, override
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Return a callable corresponding to lookup_view. This function is used
by both resolve() and reverse(), so can_fail allows the caller to choose
between returning the input as is and raising an exception when the input
string can't be interpreted as an import path.
If lookup_view is already a callable, return it.
If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it.
If lookup_view is some other kind of string and can_fail is True, the string
is returned as is. If can_fail is False, an exception is raised (either
ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist(
"'%s' is not a callable or a dot-notation path" % lookup_view
)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
if can_fail:
return lookup_view
else:
raise ImportError(
"Could not import '%s'. The path must be fully qualified." %
lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
if can_fail:
return lookup_view
else:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name))
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
if can_fail:
return lookup_view
else:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name))
else:
if not callable(view_func):
# For backwards compatibility this is raised regardless of can_fail
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name))
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if hasattr(pattern, '_callback_str'):
self._callback_strs.add(pattern._callback_str)
elif hasattr(pattern, '_callback'):
callback = pattern._callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
lookup_str = callback.__module__ + "." + callback.__class__.__name__
else:
lookup_str = callback.__module__ + "." + callback.__name__
self._callback_strs.add(lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, six.string_types):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included urlconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
original_lookup = lookup_view
try:
if self._is_callback(lookup_view):
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
else:
if not callable(original_lookup) and callable(lookup_view):
warnings.warn(
'Reversing by dotted path is deprecated (%s).' % original_lookup,
RemovedInDjango110Warning, stacklevel=3
)
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if (set(kwargs.keys()) | set(defaults.keys()) != set(params) |
set(defaults.keys())):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE):
# safe characters from `pchar` definition of RFC 3986
url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@'))
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
if current_app:
current_path = current_app.split(':')
current_path.reverse()
else:
current_path = None
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)))
reverse_lazy = lazy(reverse, six.text_type)
def clear_url_caches():
get_callable.cache_clear()
get_resolver.cache_clear()
get_ns_resolver.cache_clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def translate_url(url, lang_code):
"""
Given a URL (absolute or relative), try to get its translated version in
the `lang_code` language (either by i18n_patterns or by translated regex).
Return the original URL if no translated version is found.
"""
parsed = urlsplit(url)
try:
match = resolve(parsed.path)
except Resolver404:
pass
else:
to_be_reversed = "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name
with override(lang_code):
try:
url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs)
except NoReverseMatch:
pass
else:
url = urlunsplit((parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment))
return url
| bobcyw/django | django/core/urlresolvers.py | Python | bsd-3-clause | 26,463 |
# The MIT License
#
# Copyright (c) 2008 William T. Katz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""A simple RESTful status framework on Google App Engine
This app's API should be reasonably clean and easily targeted by other
clients, like a Flex app or a desktop program.
"""
__author__ = 'Kyle Conroy'
import string
import re
import os
import cgi
import logging
from datetime import timedelta
from datetime import date
from datetime import datetime
from datetime import time
from dateutil.parser import parse
from google.appengine.api import memcache
from google.appengine.api import datastore_errors
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from handlers import restful
from time import mktime
from utils import authorized
from utils import slugify
from models import List, Status, Event, Service, Image
from wsgiref.handlers import format_date_time
def invalidate_cache():
all_pages = memcache.get("__all_pages__")
if all_pages is not None:
for page,d in all_pages.items():
if not memcache.delete(page):
logging.error("Memcache delete failed on %s", page)
if not memcache.delete("__all_pages__"):
logging.error("Memcache delete failed on __all_pages__")
taskqueue.add(url='/', method="GET")
def aware_to_naive(d):
"""Convert an aware date to an naive date, in UTC"""
offset = d.utcoffset()
if offset:
d = d.replace(tzinfo=None)
d = d - offset
return d
class NotFoundHandler(restful.Controller):
def get(self):
self.error(404, "Can't find resource")
class ListsListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = List.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
data = {"lists": data}
self.json(data)
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
if not name or not description:
self.error(400, "Bad Data: Name: %s, Description: %s" \
% (name, description))
return
slug = slugify.slugify(name)
existing_s = List.get_by_slug(slug)
if existing_s:
self.error(404, "A list with this name already exists")
return
l = List(name=name, slug=slug, description=description)
l.put()
invalidate_cache()
self.response.set_status(201)
self.json(l.rest(self.base_url(version)))
class ListInstanceHandler(restful.Controller):
def get(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "List %s does not exist" % list_slug)
return
self.json(list.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "Service %s does not exist" % list_slug)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
if description:
list.description = description
if name:
list.name = name
if name or description:
invalidate_cache()
list.put()
self.json(list.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, list_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
list = List.get_by_slug(list_slug)
if not list:
self.error(404, "List %s not found" % list_slug)
return
query = Service.all()
query.filter('list =', list)
if query:
for s in query:
s.list = None
s.put()
invalidate_cache()
list.delete()
self.json(list.rest(self.base_url(version)))
class ServicesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = Service.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
data = {"services": data}
self.json(data)
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
slist = self.request.get('list', default_value=None)
l = None
if slist:
l = List.all().filter("slug =", slist).get()
if not name:
self.error(400, "Bad name: %s" % name)
return
if not description:
self.error(400, "Bad description: %s" % description)
return
if slist and not l:
self.error(400, "Bad list slug: %s" % slist)
return
slug = slugify.slugify(name)
existing_s = Service.get_by_slug(slug)
if existing_s:
self.error(404, "A sevice with this name already exists")
return
s = Service(name=name, slug=slug, description=description, list=l)
s.put()
invalidate_cache()
self.response.set_status(201)
self.json(s.rest(self.base_url(version)))
class ServiceInstanceHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s does not exist" % service_slug)
return
self.json(service.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s does not exist" % service_slug)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
list = self.request.get('list', default_value=None)
if description:
service.description = description
if name:
service.name = name
if list:
l = List.all().filter("slug = ", list).get()
if l is None:
self.error(400, "Can't find list with slug %s" % list)
return
service.list = l
if "" == list:
service.list = None
list = "removed"
if name or description or list:
invalidate_cache()
service.put()
self.json(service.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
query = Event.all()
query.filter('service =', service)
if query:
for e in query:
e.delete()
invalidate_cache()
service.delete()
self.json(service.rest(self.base_url(version)))
class EventsListHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
start = self.request.get('start', default_value=None)
end = self.request.get('end', default_value=None)
query = Event.all()
query.filter('service =', service)
if start:
try:
_start = aware_to_naive(parse(start))
query.filter("start >= ", _start)
except:
self.error(400, "Invalid Date: %s" % start)
return
if end:
try:
_end = aware_to_naive(parse(end))
query.filter("start <=", _end)
except:
self.error(400, "Invalid Date: %s" % end)
return
query.order('-start')
data = [s.rest(self.base_url(version)) for s in query]
self.json({"events": data})
@authorized.api("admin")
def post(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status_slug = self.request.get("status", default_value=None)
message = self.request.get("message", default_value=None)
informational = self.request.get("informational", default_value=None)
if not message:
self.error(400, "Event message is required")
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
if not status_slug:
event = service.current_event()
if event:
status = event.status
else:
status = Status.get_default()
else:
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "Status %s not found" % status_slug)
return
e = Event(status=status, service=service, message=message)
e.informational = informational and informational == "true"
e.put()
# Queue up a task that calls the Twitter API to make a tweet.
if self.request.get('tweet'):
logging.info('Attempting to post a tweet for the latest event via async GAE task queue.')
taskqueue.add(url='/admin/tweet', params={'service_name': service.name, 'status_name': status.name, 'message': message})
invalidate_cache()
self.json(e.rest(self.base_url(version)))
class CurrentEventHandler(restful.Controller):
def get(self, version, service_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
event = service.current_event()
if not event:
self.error(404, "No current event for Service %s" % service_slug)
return
self.json(event.rest(self.base_url(version)))
class EventInstanceHandler(restful.Controller):
def get(self, version, service_slug, sid):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
try:
event = Event.get(db.Key(sid))
except datastore_errors.BadKeyError:
self.error(404, "Event %s not found" % sid)
return
if not event or service.key() != event.service.key():
self.error(404, "No event for Service %s with sid = %s" \
% (service_slug, sid))
return
self.json(event.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, service_slug, sid):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
service = Service.get_by_slug(service_slug)
if not service:
self.error(404, "Service %s not found" % service_slug)
return
try:
event = Event.get(db.Key(sid))
except datastore_errors.BadKeyError:
self.error(404, "Event %s not found" % sid)
return
if not event or service.key() != event.service.key():
self.error(404, "No event for Service %s with sid = %s" \
% (service_slug, sid))
return
event.delete()
invalidate_cache()
# Why not JSON?
self.success(event.rest(self.base_url(version)))
class StatusesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
query = Status.all().order('name')
data = [s.rest(self.base_url(version)) for s in query]
self.json({"statuses": data})
@authorized.api("admin")
def post(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
name = self.request.get('name', default_value=None)
description = self.request.get('description', default_value=None)
image_slug = self.request.get('image', default_value=None)
default = self.request.get('default', default_value="false")
if default not in ["true", "false"]:
self.error(400, "Default must be true or false")
return
if not name or not description or not image_slug:
self.error(400, "Bad Data")
return
slug = slugify.slugify(name)
status = Status.get_by_slug(slug)
image = Image.get_by_slug(image_slug)
if status is not None:
self.error(400, "A Status with the slug %s already exists" % slug)
return
if image is None:
msg = "An Image with the slug %s doesn't exist" % image_slug
self.error(400, msg)
return
# Reset default status
if default == "true":
for stat in Status.all().filter("default", True):
stat.default = False
stat.put()
default = default == "true"
status = Status(name=name, slug=slug, description=description,
image=image.path, default=default)
status.put()
invalidate_cache()
self.response.set_status(201)
self.json(status.rest(self.base_url(version)))
class StatusInstanceHandler(restful.Controller):
def get(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "No status with the slug %s found" % status_slug)
return
self.json(status.rest(self.base_url(version)))
@authorized.api("admin")
def post(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "No status with the slug %s found" % status_slug)
return
name = self.request.get('name', default_value=None)
image_slug = self.request.get('image', default_value=None)
image = None
default = self.request.get('default', default_value=None)
description = self.request.get('description', default_value=None)
if image_slug is not None:
image = Image.get_by_slug(image_slug)
if image is None:
self.error(400, "An Image with the "
"slug %s doesn't exist" % image_slug)
return
status.image = image.path
if description is not None:
status.description = description
if default is not None and default in ["false", "true"]:
# Reset default status
if default == "true":
for stat in Status.all().filter("default", True):
stat.default = False
stat.put()
status.default = default == "true"
if name is not None:
status.name = name
if description or name or image or default:
status.put()
invalidate_cache()
self.json(status.rest(self.base_url(version)))
@authorized.api("admin")
def delete(self, version, status_slug):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
status = Status.get_by_slug(status_slug)
if not status:
self.error(404, "Status %s not found" % status_slug)
return
# We may want to think more about this
events = Event.all().filter('status =', status).fetch(1000)
for event in events:
event.delete()
status.delete()
self.json(status.rest(self.base_url(version)))
class LevelListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
self.json({"levels": ["NORMAL", "WARNING", "ERROR", "CRITICAL"]})
class ImagesListHandler(restful.Controller):
def get(self, version):
if not self.valid_version(version):
self.error(404, "API Version %s not supported" % version)
return
host = self.request.headers.get('host', 'nohost')
images = []
for img in Image.all().fetch(1000):
image = {
"url": "http://" + host + "/images/" + img.path,
"icon_set": img.icon_set,
"name": img.slug,
}
images.append(image)
self.json({"images": images})
| unixboy/stashboard | stashboard/handlers/api.py | Python | mit | 20,209 |
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
http://dlmf.nist.gov/8.20#ii
"""
from __future__ import division, print_function, absolute_import
import os
import warnings
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write("double A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("int Adegs[] = {{{}}};\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
| asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/special/_precompute/expn_asy.py | Python | mit | 1,585 |
# -*- coding: utf-8 -*-
'''
test_qgscomposerlabel.py
--------------------------------------
Date : Oct 2012
Copyright : (C) 2012 by Dr. Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis
import unittest
from utilities import getQgisTestApp, unitTestDataPath
from PyQt4.QtCore import QFileInfo, QDate, QDateTime
from qgis.core import QgsVectorLayer, QgsMapLayerRegistry, QgsMapRenderer, QgsComposition, QgsComposerLabel, QgsFeatureRequest, QgsFeature, QgsExpression
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsComposerLabel(unittest.TestCase):
def testCase(self):
TEST_DATA_DIR = unitTestDataPath()
vectorFileInfo = QFileInfo( TEST_DATA_DIR + "/france_parts.shp")
mVectorLayer = QgsVectorLayer( vectorFileInfo.filePath(), vectorFileInfo.completeBaseName(), "ogr" )
QgsMapLayerRegistry.instance().addMapLayers( [mVectorLayer] )
# create composition with composer map
mMapRenderer = QgsMapRenderer()
layerStringList = []
layerStringList.append( mVectorLayer.id() )
mMapRenderer.setLayerSet( layerStringList )
mMapRenderer.setProjectionsEnabled( False )
mComposition = QgsComposition( mMapRenderer )
mComposition.setPaperSize( 297, 210 )
mLabel = QgsComposerLabel( mComposition )
mComposition.addComposerLabel( mLabel )
self.evaluation_test( mComposition, mLabel )
self.feature_evaluation_test( mComposition, mLabel, mVectorLayer )
self.page_evaluation_test( mComposition, mLabel, mVectorLayer )
def evaluation_test( self, mComposition, mLabel ):
# $CURRENT_DATE evaluation
mLabel.setText( "__$CURRENT_DATE__" )
assert mLabel.displayText() == ( "__" + QDate.currentDate().toString() + "__" )
# $CURRENT_DATE() evaluation
mLabel.setText( "__$CURRENT_DATE(dd)(ok)__" )
expected = "__" + QDateTime.currentDateTime().toString( "dd" ) + "(ok)__"
assert mLabel.displayText() == expected
# $CURRENT_DATE() evaluation (inside an expression)
mLabel.setText( "__[%$CURRENT_DATE(dd) + 1%](ok)__" )
dd = QDate.currentDate().day()
expected = "__%d(ok)__" % (dd+1)
assert mLabel.displayText() == expected
# expression evaluation (without associated feature)
mLabel.setText( "__[%\"NAME_1\"%][%21*2%]__" )
assert mLabel.displayText() == "__[NAME_1]42__"
def feature_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
provider = mVectorLayer.dataProvider()
fi = provider.getFeatures( QgsFeatureRequest() )
feat = QgsFeature()
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
mLabel.setText( "[%\"NAME_1\"||'_ok'%]")
assert mLabel.displayText() == "Basse-Normandie_ok"
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
assert mLabel.displayText() == "Bretagne_ok"
# evaluation with local variables
locs = { "$test" : "OK" }
mLabel.setExpressionContext( feat, mVectorLayer, locs )
mLabel.setText( "[%\"NAME_1\"||$test%]" )
assert mLabel.displayText() == "BretagneOK"
def page_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
mComposition.setNumPages( 2 )
mLabel.setText( "[%$page||'/'||$numpages%]" )
assert mLabel.displayText() == "1/2"
# move the the second page and re-evaluate
mLabel.setItemPosition( 0, 320 )
assert mLabel.displayText() == "2/2"
# use setSpecialColumn
mLabel.setText( "[%$var1 + 1%]" )
QgsExpression.setSpecialColumn( "$var1", 41 )
assert mLabel.displayText() == "42"
QgsExpression.setSpecialColumn( "$var1", 99 )
assert mLabel.displayText() == "100"
QgsExpression.unsetSpecialColumn( "$var1" )
assert mLabel.displayText() == "[%$var1 + 1%]"
if __name__ == '__main__':
unittest.main()
| herow/planning_qgis | tests/src/python/test_qgscomposerlabel.py | Python | gpl-2.0 | 4,750 |
from __future__ import print_function
import lammps
import ctypes
import traceback
import numpy as np
class LAMMPSFix(object):
def __init__(self, ptr, group_name="all"):
self.lmp = lammps.lammps(ptr=ptr)
self.group_name = group_name
class LAMMPSFixMove(LAMMPSFix):
def __init__(self, ptr, group_name="all"):
super(LAMMPSFixMove, self).__init__(ptr, group_name)
def init(self):
pass
def initial_integrate(self, vflag):
pass
def final_integrate(self):
pass
def initial_integrate_respa(self, vflag, ilevel, iloop):
pass
def final_integrate_respa(self, ilevel, iloop):
pass
def reset_dt(self):
pass
class NVE(LAMMPSFixMove):
""" Python implementation of fix/nve """
def __init__(self, ptr, group_name="all"):
super(NVE, self).__init__(ptr)
assert(self.group_name == "all")
def init(self):
dt = self.lmp.extract_global("dt", 1)
ftm2v = self.lmp.extract_global("ftm2v", 1)
self.ntypes = self.lmp.extract_global("ntypes", 0)
self.dtv = dt
self.dtf = 0.5 * dt * ftm2v
def initial_integrate(self, vflag):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
x = self.lmp.numpy.extract_atom_darray("x", nlocal, dim=3)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
for i in range(x.shape[0]):
dtfm = self.dtf / mass[int(atype[i])]
v[i,:]+= dtfm * f[i,:]
x[i,:] += self.dtv * v[i,:]
def final_integrate(self):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
for i in range(v.shape[0]):
dtfm = self.dtf / mass[int(atype[i])]
v[i,:] += dtfm * f[i,:]
class NVE_Opt(LAMMPSFixMove):
""" Performance-optimized Python implementation of fix/nve """
def __init__(self, ptr, group_name="all"):
super(NVE_Opt, self).__init__(ptr)
assert(self.group_name == "all")
def init(self):
dt = self.lmp.extract_global("dt", 1)
ftm2v = self.lmp.extract_global("ftm2v", 1)
self.ntypes = self.lmp.extract_global("ntypes", 0)
self.dtv = dt
self.dtf = 0.5 * dt * ftm2v
self.mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
def initial_integrate(self, vflag):
nlocal = self.lmp.extract_global("nlocal", 0)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
x = self.lmp.numpy.extract_atom_darray("x", nlocal, dim=3)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
dtf = self.dtf
dtv = self.dtv
mass = self.mass
dtfm = dtf / np.take(mass, atype)
dtfm.reshape((nlocal, 1))
for d in range(x.shape[1]):
v[:,d] += dtfm[:,0] * f[:,d]
x[:,d] += dtv * v[:,d]
def final_integrate(self):
nlocal = self.lmp.extract_global("nlocal", 0)
mass = self.lmp.numpy.extract_atom_darray("mass", self.ntypes+1)
atype = self.lmp.numpy.extract_atom_iarray("type", nlocal)
v = self.lmp.numpy.extract_atom_darray("v", nlocal, dim=3)
f = self.lmp.numpy.extract_atom_darray("f", nlocal, dim=3)
dtf = self.dtf
dtv = self.dtv
mass = self.mass
dtfm = dtf / np.take(mass, atype)
dtfm.reshape((nlocal, 1))
for d in range(v.shape[1]):
v[:,d] += dtfm[:,0] * f[:,d]
| quang-ha/lammps | examples/python/py_nve.py | Python | gpl-2.0 | 3,986 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy.orm import exc as orm_exc
from neutron.common import exceptions as n_exc
import neutron.db.api as db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.ryu.db import models_v2 as ryu_models_v2
LOG = logging.getLogger(__name__)
def network_all_tenant_list():
session = db.get_session()
return session.query(models_v2.Network).all()
def get_port_from_device(port_id):
LOG.debug(_("get_port_from_device() called:port_id=%s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
query = session.query(models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id == port_id)
port_and_sgs = query.all()
if not port_and_sgs:
return None
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict[ext_sg.SECURITYGROUPS] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']]
return port_dict
class TunnelKey(object):
# VLAN: 12 bits
# GRE, VXLAN: 24bits
# TODO(yamahata): STT: 64bits
_KEY_MIN_HARD = 1
_KEY_MAX_HARD = 0xffffffff
def __init__(self, key_min=_KEY_MIN_HARD, key_max=_KEY_MAX_HARD):
self.key_min = key_min
self.key_max = key_max
if (key_min < self._KEY_MIN_HARD or key_max > self._KEY_MAX_HARD or
key_min > key_max):
raise ValueError(_('Invalid tunnel key options '
'tunnel_key_min: %(key_min)d '
'tunnel_key_max: %(key_max)d. '
'Using default value') % {'key_min': key_min,
'key_max': key_max})
def _last_key(self, session):
try:
return session.query(ryu_models_v2.TunnelKeyLast).one()
except orm_exc.MultipleResultsFound:
max_key = session.query(
func.max(ryu_models_v2.TunnelKeyLast.last_key))
if max_key > self.key_max:
max_key = self.key_min
session.query(ryu_models_v2.TunnelKeyLast).delete()
last_key = ryu_models_v2.TunnelKeyLast(last_key=max_key)
except orm_exc.NoResultFound:
last_key = ryu_models_v2.TunnelKeyLast(last_key=self.key_min)
session.add(last_key)
session.flush()
return session.query(ryu_models_v2.TunnelKeyLast).one()
def _find_key(self, session, last_key):
"""Try to find unused tunnel key.
Trying to find unused tunnel key in TunnelKey table starting
from last_key + 1.
When all keys are used, raise sqlalchemy.orm.exc.NoResultFound
"""
# key 0 is used for special meanings. So don't allocate 0.
# sqlite doesn't support
# '(select order by limit) union all (select order by limit) '
# 'order by limit'
# So do it manually
# new_key = session.query("new_key").from_statement(
# # If last_key + 1 isn't used, it's the result
# 'SELECT new_key '
# 'FROM (SELECT :last_key + 1 AS new_key) q1 '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
#
# 'UNION ALL '
#
# # if last_key + 1 used,
# # find the least unused key from last_key + 1
# '(SELECT t.tunnel_key + 1 AS new_key '
# 'FROM tunnelkeys t '
# 'WHERE NOT EXISTS '
# '(SELECT 1 FROM tunnelkeys ti '
# ' WHERE ti.tunnel_key = t.tunnel_key + 1) '
# 'AND t.tunnel_key >= :last_key '
# 'ORDER BY new_key LIMIT 1) '
#
# 'ORDER BY new_key LIMIT 1'
# ).params(last_key=last_key).one()
try:
new_key = session.query("new_key").from_statement(
# If last_key + 1 isn't used, it's the result
'SELECT new_key '
'FROM (SELECT :last_key + 1 AS new_key) q1 '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys WHERE tunnel_key = :last_key + 1) '
).params(last_key=last_key).one()
except orm_exc.NoResultFound:
new_key = session.query("new_key").from_statement(
# if last_key + 1 used,
# find the least unused key from last_key + 1
'(SELECT t.tunnel_key + 1 AS new_key '
'FROM tunnelkeys t '
'WHERE NOT EXISTS '
'(SELECT 1 FROM tunnelkeys ti '
' WHERE ti.tunnel_key = t.tunnel_key + 1) '
'AND t.tunnel_key >= :last_key '
'ORDER BY new_key LIMIT 1) '
).params(last_key=last_key).one()
new_key = new_key[0] # the result is tuple.
LOG.debug(_("last_key %(last_key)s new_key %(new_key)s"),
{'last_key': last_key, 'new_key': new_key})
if new_key > self.key_max:
LOG.debug(_("No key found"))
raise orm_exc.NoResultFound()
return new_key
def _allocate(self, session, network_id):
last_key = self._last_key(session)
try:
new_key = self._find_key(session, last_key.last_key)
except orm_exc.NoResultFound:
new_key = self._find_key(session, self.key_min)
tunnel_key = ryu_models_v2.TunnelKey(network_id=network_id,
tunnel_key=new_key)
last_key.last_key = new_key
session.add(tunnel_key)
return new_key
_TRANSACTION_RETRY_MAX = 16
def allocate(self, session, network_id):
count = 0
while True:
session.begin(subtransactions=True)
try:
new_key = self._allocate(session, network_id)
session.commit()
break
except sa_exc.SQLAlchemyError:
session.rollback()
count += 1
if count > self._TRANSACTION_RETRY_MAX:
# if this happens too often, increase _TRANSACTION_RETRY_MAX
LOG.warn(_("Transaction retry exhausted (%d). "
"Abandoned tunnel key allocation."), count)
raise n_exc.ResourceExhausted()
return new_key
def delete(self, session, network_id):
session.query(ryu_models_v2.TunnelKey).filter_by(
network_id=network_id).delete()
session.flush()
def all_list(self):
session = db.get_session()
return session.query(ryu_models_v2.TunnelKey).all()
def set_port_status(session, port_id, status):
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.merge(port)
session.flush()
except orm_exc.NoResultFound:
raise n_exc.PortNotFound(port_id=port_id)
| Juniper/neutron | neutron/plugins/ryu/db/api_v2.py | Python | apache-2.0 | 8,246 |
# mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# mock 0.8.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'mocksignature',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
)
__version__ = '0.8.0'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
# getsignature and mocksignature heavily "inspired" by
# the decorator module: http://pypi.python.org/pypi/decorator/
# by Michele Simionato
def _getsignature(func, skipfirst):
if inspect is None:
raise ImportError('inspect module not available')
if inspect.isclass(func):
func = func.__init__
# will have a self arg
skipfirst = True
elif not (inspect.ismethod(func) or inspect.isfunction(func)):
func = func.__call__
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
# instance methods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
_msg = ("_mock_ is a reserved argument name, can't mock signatures using "
"_mock_")
assert '_mock_' not in regargs, _msg
if varargs is not None:
assert '_mock_' not in varargs, _msg
if varkwargs is not None:
assert '_mock_' not in varkwargs, _msg
if skipfirst:
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _getsignature2(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature2(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original. This is effectively mocksignature2.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature2(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
context = {'_mock_': mock}
checksig = eval(src, context)
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'checksig': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
checksig(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def mocksignature(func, mock=None, skipfirst=False):
"""
mocksignature(func, mock=None, skipfirst=False)
Create a new function with the same signature as `func` that delegates
to `mock`. If `skipfirst` is True the first argument is skipped, useful
for methods where `self` needs to be omitted from the new function.
If you don't pass in a `mock` then one will be created for you.
The mock is set as the `mock` attribute of the returned function for easy
access.
Functions returned by `mocksignature` have many of the same attributes
and assert methods as a mock object.
`mocksignature` can also be used with classes. It copies the signature of
the `__init__` method.
When used with callable objects (instances) it copies the signature of the
`__call__` method.
"""
if mock is None:
mock = Mock()
signature, func = _getsignature(func, skipfirst)
src = "lambda %(signature)s: _mock_(%(signature)s)" % {
'signature': signature
}
funcopy = eval(src, dict(_mock_=mock))
_copy_func_details(func, funcopy)
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
mock._mock_signature = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
class OldStyleClass:
pass
ClassType = type(OldStyleClass)
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
ClassTypes = (type,)
if not inPy3k:
ClassTypes = (type, ClassType)
_allowed_names = set(
[
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
]
)
def _mock_signature_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_signature
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_signature
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_signature'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_signature is not None:
ret = self._mock_signature.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_signature is not None:
self._mock_signature.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _mock_signature_property('called')
call_count = _mock_signature_property('call_count')
call_args = _mock_signature_property('call_args')
call_args_list = _mock_signature_property('call_args_list')
mock_calls = _mock_signature_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_signature
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_signature
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
real = lambda *args, **kw: original(self, *args, **kw)
value = mocksignature(value, real, skipfirst=True)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
return object.__delattr__(self, name)
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
self.call_args = _Call((args, kwargs), two=True)
self.call_args_list.append(_Call((args, kwargs), two=True))
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
return next(effect)
ret_val = effect(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
Alternatively `side_effect` can be an exception class or instance. In
this case the exception will be raised when the mock is called.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None
then calling the Mock will pass the call through to the wrapped object
(returning the real result and ignoring `return_value`). Attribute
access on the mock will return a Mock object that wraps the corresponding
attribute of the wrapped object (so attempting to access an attribute that
doesn't exist will raise an `AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
def __init__(
self, getter, attribute, new, spec, create,
mocksignature, spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not False:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.mocksignature = mocksignature
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.mocksignature, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, ClassTypes):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
# don't use a with here (backwards compatability with Python 2.4)
extra_args = []
entered_patchers = []
# can't use try...except...finally because of Python 2.4
# compatibility
try:
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__()
patched.patchings = [self]
if hasattr(func, 'func_code'):
# not in Python 3
patched.compat_co_firstlineno = getattr(
func, "compat_co_firstlineno",
func.func_code.co_firstlineno
)
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
original, local = self.get_original()
if new is DEFAULT and autospec is False:
inherit = False
if spec_set == True:
spec_set = original
elif spec == True:
# set spec to the object we are replacing
spec = original
if (spec or spec_set) is not None:
if isinstance(original, ClassTypes):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif (spec or spec_set) is not None:
if not _callable(spec or spec_set):
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
if (not _is_list(spec or spec_set) and not
_instance_callable(spec or spec_set)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not False:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool? mocksignature should also not be used. Should we
# check this?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
if self.mocksignature:
new_attr = mocksignature(original, new)
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *_):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and not hasattr(self.target, self.attribute):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__()
start = __enter__
stop = __exit__
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs
):
"""
patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs)
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `mocksignature`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create, mocksignature,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False,
mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs
):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`, `mocksignature`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) in (unicode, str):
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, mocksignature, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, mocksignature, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
mocksignature=False, spec_set=None, autospec=False,
new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
(specified in the form `'package.module.ClassName'`) is patched
with a `new` object. When the function/with statement exits the patch is
undone.
The `target` is imported and the specified attribute patched with the new
object, so it must be importable from the environment you are calling the
decorator from. The target is imported when the decorated function is
executed, not at decoration time.
If `new` is omitted, then a new `MagicMock` is created and passed in as an
extra argument to the decorated function.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock with be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being mocked
will have their arguments checked and will raise a `TypeError` if they are
called with the wrong signature (similar to `mocksignature`). For mocks
replacing a class, their return value (the 'instance') will have the same
spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
If `mocksignature` is True then the patch will be done with a function
created by mocking the one being replaced. If the object being replaced is
a class then the signature of `__init__` will be copied. If the object
being replaced is a callable object then the signature of `__call__` will
be copied.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create, mocksignature,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, basestring):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, ClassTypes):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
"divmod neg pos abs invert "
"complex int float index "
"trunc floor ceil "
)
numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
extra = ''
if inPy3k:
extra = 'bool next '
else:
extra = 'unicode long nonzero oct hex truediv rtruediv '
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = set('__%s__' % method for method in [
'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
'format', 'get', 'set', 'delete', 'reversed',
'missing', 'reduce', 'reduce_ex', 'getinitargs',
'getnewargs', 'getstate', 'setstate', 'getformat',
'setformat', 'repr', 'dir'
])
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = set(
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right, extra]).split()
)
_all_magics = _magics | _non_defaults
_unsupported_magics = set([
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
])
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__unicode__': lambda self: unicode(object.__str__(self)),
}
_return_values = {
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__nonzero__': True,
'__oct__': '1',
'__hex__': '0x1',
'__long__': long(1),
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
return self is other
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
return self is not other
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics()
def _mock_set_magics(self):
these_magics = _magics
if self._mock_methods is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in kwargs.items()
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name=None, parent=None, two=False,
from_kall=True):
name = ''
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, basestring):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, basestring):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, basestring):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
else:
# len 2
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, basestring):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked in a
similar way to `mocksignature` to check that they are called with the
correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, ClassTypes)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
_kwargs.update(kwargs)
Klass = MagicMock
if type(spec) in DescriptorTypes:
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
# XXXX could give a name to the return_value mock?
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
# allow a mock to actually be a function from mocksignature
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions on being fetched
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
original = getattr(spec, entry)
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent, **kwargs)
mock._mock_children[entry] = new
skipfirst = _must_skip(spec, entry, is_type)
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with mocksignature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
if not isinstance(spec, ClassTypes):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
# can't use type because of old style classes
spec = spec.__class__
if not hasattr(spec, '__mro__'):
# old style class: can't have descriptors anyway
return is_type
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
return is_type
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# in Python 2, _sre.SRE_Pattern objects have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
| mozilla/popcorn_maker | vendor-local/lib/python/mock.py | Python | bsd-3-clause | 73,348 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import ovirtsdk4 as sdk
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.ovirt import *
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_host_pm
short_description: Module to manage power management of hosts in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage power management of hosts in oVirt."
options:
name:
description:
- "Name of the the host to manage."
required: true
aliases: ['host']
state:
description:
- "Should the host be present/absent."
choices: ['present', 'absent']
default: present
address:
description:
- "Address of the power management interface."
username:
description:
- "Username to be used to connect to power management interface."
password:
description:
- "Password of the user specified in C(username) parameter."
type:
description:
- "Type of the power management. oVirt predefined values are I(drac5), I(ipmilan), I(rsa),
I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
but user can have defined custom type."
port:
description:
- "Power management interface port."
slot:
description:
- "Power management slot."
options:
description:
- "Dictionary of additional fence agent options."
- "Additional information about options can be found at U(https://fedorahosted.org/cluster/wiki/FenceArguments)."
encrypt_options:
description:
- "If (true) options will be encrypted when send to agent."
aliases: ['encrypt']
order:
description:
- "Integer value specifying, by default it's added at the end."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add fence agent to host 'myhost'
- ovirt_host_pm:
name: myhost
address: 1.2.3.4
options:
myoption1: x
myoption2: y
username: admin
password: admin
port: 3333
type: ipmilan
# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost'
- ovirt_host_pm:
state: absent
name: myhost
address: 1.2.3.4
type: ipmilan
'''
RETURN = '''
id:
description: ID of the agent which is managed
returned: On success if agent is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
agent:
description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/agent."
returned: On success if agent is found.
'''
class HostModule(BaseModule):
def build_entity(self):
return otypes.Host(
power_management=otypes.PowerManagement(
enabled=True,
),
)
def update_check(self, entity):
return equal(True, entity.power_management.enabled)
class HostPmModule(BaseModule):
def build_entity(self):
return otypes.Agent(
address=self._module.params['address'],
encrypt_options=self._module.params['encrypt_options'],
options=[
otypes.Option(
name=name,
value=value,
) for name, value in self._module.params['options'].items()
] if self._module.params['options'] else None,
password=self._module.params['password'],
port=self._module.params['port'],
type=self._module.params['type'],
username=self._module.params['username'],
order=self._module.params.get('order', 100),
)
def update_check(self, entity):
return (
equal(self._module.params.get('address'), entity.address) and
equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
equal(self._module.params.get('password'), entity.password) and
equal(self._module.params.get('username'), entity.username) and
equal(self._module.params.get('port'), entity.port) and
equal(self._module.params.get('type'), entity.type)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True, aliases=['host']),
address=dict(default=None),
username=dict(default=None),
password=dict(default=None),
type=dict(default=None),
port=dict(default=None, type='int'),
slot=dict(default=None),
options=dict(default=None, type='dict'),
encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
connection = create_connection(module.params.pop('auth'))
hosts_service = connection.system_service().hosts_service()
host = search_by_name(hosts_service, module.params['name'])
fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
host_pm_module = HostPmModule(
connection=connection,
module=module,
service=fence_agents_service,
)
host_module = HostModule(
connection=connection,
module=module,
service=hosts_service,
)
state = module.params['state']
if state == 'present':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.create(entity=agent)
# Enable Power Management, if it's not enabled:
host_module.create(entity=host)
elif state == 'absent':
agent = host_pm_module.search_entity(
search_params={
'address': module.params['address'],
'type': module.params['type'],
}
)
ret = host_pm_module.remove(entity=agent)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e))
finally:
connection.close(logout=False)
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| kaarolch/ansible | lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py | Python | gpl-3.0 | 7,687 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
__author__ = '[email protected] (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import logging
import socket
import sys
import webbrowser
from six.moves import BaseHTTPServer
from six.moves import urllib
from oauth2client import client
from oauth2client import util
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
# argparser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write("<html><head><title>Authentication Status</title></head>")
self.wfile.write("<body><p>The authentication flow has completed.</p>")
self.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from oauth2client.old_run import run
from oauth2client.old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
| ychen820/microblog | y/google-cloud-sdk/platform/gsutil/third_party/oauth2client/oauth2client/tools.py | Python | bsd-3-clause | 8,468 |
import os
from chainer import serializers
from chainer import utils
def save_and_load(src, dst, filename, saver, loader):
"""Saves ``src`` and loads it to ``dst`` using a de/serializer.
This function simply runs a serialization and deserialization to check if
the serialization code is correctly implemented. The save and load are
done within a temporary directory.
Args:
src: An object to save from.
dst: An object to load into.
filename (str): File name used during the save/load.
saver (callable): Function that saves the source object.
loader (callable): Function that loads the file into the destination
object.
"""
with utils.tempdir() as tempdir:
path = os.path.join(tempdir, filename)
saver(path, src)
loader(path, dst)
def save_and_load_npz(src, dst):
"""Saves ``src`` to an NPZ file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using NPZ de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.npz',
serializers.save_npz, serializers.load_npz)
def save_and_load_hdf5(src, dst):
"""Saves ``src`` to an HDF5 file and loads it to ``dst``.
This is a short cut of :func:`save_and_load` using HDF5 de/serializers.
Args:
src: An object to save.
dst: An object to load to.
"""
save_and_load(src, dst, 'tmp.h5',
serializers.save_hdf5, serializers.load_hdf5)
| rezoo/chainer | chainer/testing/serializer.py | Python | mit | 1,562 |
import urllib
def basic_authentication(username=None, password=None, protocol="http"):
from .fixtures import server_config, url
build_url = url(server_config())
query = {}
return build_url("/webdriver/tests/support/authentication.py",
query=urllib.urlencode(query),
protocol=protocol)
def main(request, response):
user = request.auth.username
password = request.auth.password
if user == "user" and password == "password":
return "Authentication done"
realm = "test"
if "realm" in request.GET:
realm = request.GET.first("realm")
return ((401, "Unauthorized"),
[("WWW-Authenticate", 'Basic realm="' + realm + '"')],
"Please login with credentials 'user' and 'password'")
| UK992/servo | tests/wpt/web-platform-tests/webdriver/tests/support/authentication.py | Python | mpl-2.0 | 800 |
#!/usr/bin/python2.4
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import common
import os
import re
import socket # for gethostname
import xml.dom
import xml_fix
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.doc = None
def Create(self, name):
"""Creates the user file document.
Args:
name: Name of the user file.
"""
self.name = name
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioUserFile', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
def _AddConfigToNode(self, parent, config_type, config_name):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
"""
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
parent.appendChild(n_config)
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name)
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
n_cmd = self.doc.createElement('DebugSettings')
abs_command = _FindCommandInPath(command[0])
n_cmd.setAttribute('Command', abs_command)
n_cmd.setAttribute('WorkingDirectory', working_directory)
n_cmd.setAttribute('CommandArguments', " ".join(command[1:]))
n_cmd.setAttribute('RemoteMachine', socket.gethostname())
if environment and isinstance(environment, dict):
n_cmd.setAttribute('Environment',
" ".join(['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]))
else:
n_cmd.setAttribute('Environment', '')
n_cmd.setAttribute('EnvironmentMerge', 'true')
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
n_cmd.setAttribute('Attach', 'false')
n_cmd.setAttribute('DebuggerType', '3') # 'auto' debugger
n_cmd.setAttribute('Remote', '1')
n_cmd.setAttribute('RemoteCommand', '')
n_cmd.setAttribute('HttpUrl', '')
n_cmd.setAttribute('PDBPath', '')
n_cmd.setAttribute('SQLDebugging', '')
n_cmd.setAttribute('DebuggerFlavor', '0')
n_cmd.setAttribute('MPIRunCommand', '')
n_cmd.setAttribute('MPIRunArguments', '')
n_cmd.setAttribute('MPIRunWorkingDirectory', '')
n_cmd.setAttribute('ApplicationCommand', '')
n_cmd.setAttribute('ApplicationArguments', '')
n_cmd.setAttribute('ShimCommand', '')
n_cmd.setAttribute('MPIAcceptMode', '')
n_cmd.setAttribute('MPIAcceptFilter', '')
# Find the config, and add it if it doesn't exist.
found = False
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
found = True
if not found:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
for config in self.n_configs.childNodes:
if config.getAttribute("Name") == config_name:
config.appendChild(n_cmd)
break
def Write(self, writer=common.WriteOnDiff):
"""Writes the user file."""
f = writer(self.user_file_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
#------------------------------------------------------------------------------
| nawawi/wkhtmltopdf | webkit/Source/ThirdParty/gyp/pylib/gyp/MSVSUserFile.py | Python | lgpl-3.0 | 6,250 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test that two targets with the same name generates an error.
"""
import os
import sys
import TestGyp
import TestCmd
# TODO(sbc): Remove the use of match_re below, done because scons
# error messages were not consistent with other generators.
# Also remove input.py:generator_wants_absolute_build_file_paths.
test = TestGyp.TestGyp()
stderr = ('gyp: Duplicate target definitions for '
'.*duplicate_targets.gyp:foo#target\n')
test.run_gyp('duplicate_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ('.*: Unable to find targets in build file .*missing_targets.gyp.*')
test.run_gyp('missing_targets.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = ('gyp: rule bar exists in duplicate, target '
'.*duplicate_rule.gyp:foo#target\n')
test.run_gyp('duplicate_rule.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
stderr = ("gyp: Key 'targets' repeated at level 1 with key path '' while "
"reading .*duplicate_node.gyp.*")
test.run_gyp('duplicate_node.gyp', '--check', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = (".*target0.*target1.*target2.*target0.*")
test.run_gyp('dependency_cycle.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = (".*file_cycle0.*file_cycle1.*file_cycle0.*")
test.run_gyp('file_cycle0.gyp', status=1, stderr=stderr,
match=TestCmd.match_re_dotall)
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', status=1, stderr=stderr)
# Check if '--no-duplicate-basename-check' works.
if ((test.format == 'make' and sys.platform == 'darwin') or
(test.format == 'msvs' and
int(os.environ.get('GYP_MSVS_VERSION', 2010)) < 2010)):
stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check',
status=1, stderr=stderr)
else:
test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check')
stderr = ("gyp: Dependency '.*missing_dep.gyp:missing.gyp#target' not found "
"while trying to load target .*missing_dep.gyp:foo#target\n")
test.run_gyp('missing_dep.gyp', status=1, stderr=stderr,
match=TestCmd.match_re)
test.pass_test()
| xforce/diorama-native-modding | tools/gyp/test/errors/gyptest-errors.py | Python | bsd-3-clause | 2,537 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name,len-as-condition
from functools import partial
from rebulk.pattern import StringPattern
from ..validators import chars_before, chars_after, chars_surround, validators
chars = ' _.'
left = partial(chars_before, chars)
right = partial(chars_after, chars)
surrounding = partial(chars_surround, chars)
def test_left_chars():
matches = list(StringPattern("word", validator=left).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=left).matches("xxx_wordxxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=left).matches("wordxxx"))
assert len(matches) == 1
def test_right_chars():
matches = list(StringPattern("word", validator=right).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=right).matches("xxxword.xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=right).matches("xxxword"))
assert len(matches) == 1
def test_surrounding_chars():
matches = list(StringPattern("word", validator=surrounding).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=surrounding).matches("word"))
assert len(matches) == 1
def test_chain():
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=validators(left, right)).matches("word"))
assert len(matches) == 1
| Toilal/rebulk | rebulk/test/test_validators.py | Python | mit | 2,170 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 University of Dundee & Open Microscopy Environment.
All Rights Reserved.
Copyright 2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
"""
import omero
from omero.rtypes import rstring
from omero.cmd import State, ERR, OK
from omero.callbacks import CmdCallbackI
PRIVATE = 'rw----'
READONLY = 'rwr---'
COLLAB = 'rwrw--'
def doChange(gateway, obj_type, obj_ids, group_id, container_id=None,
test_should_pass=True, return_complete=True):
"""
Performs the change-group action, waits on completion and checks that the
result is not an error.
"""
prx = gateway.chgrpObjects(obj_type, obj_ids, group_id, container_id)
if not return_complete:
return prx
cb = CmdCallbackI(gateway.c, prx)
try:
for i in range(10):
cb.loop(20, 500)
if prx.getResponse() is not None:
break
assert prx.getResponse() is not None
prx.getStatus()
rsp = prx.getResponse()
if test_should_pass:
assert not isinstance(rsp, ERR), \
"Found ERR when test_should_pass==true: %s (%s) params=%s" \
% (rsp.category, rsp.name, rsp.parameters)
assert State.FAILURE not in prx.getStatus().flags
else:
assert not isinstance(rsp, OK), \
"Found OK when test_should_pass==false: %s" % rsp
assert State.FAILURE in prx.getStatus().flags
return rsp
finally:
cb.close(True)
def testImageChgrp(gatewaywrapper):
"""
Create a new group with the User as member. Test move the Image to new
group.
"""
gatewaywrapper.loginAsAuthor()
image = gatewaywrapper.createTestImage()
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"chgrp-test-%s" % uuid, member_Ids=[ctx.userId], perms=COLLAB)
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Image", image.id) is not None
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Image", [image.getId()], gid)
# Image should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Image", image.id) is None, \
"Image should not be available in original group"
# Switch to new group - confirm that image is there.
gatewaywrapper.gateway.setGroupForSession(gid)
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
def testDatasetChgrp(gatewaywrapper):
"""
Create a new group with the User as member. Test move the Dataset/Image to
new group.
"""
gatewaywrapper.loginAsAuthor()
dataset = gatewaywrapper.createPDTree(dataset="testDatasetChgrp")
image = gatewaywrapper.createTestImage(dataset=dataset)
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"chgrp-test-%s" % uuid, member_Ids=[ctx.userId], perms=PRIVATE)
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Image", image.id) is not None
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Dataset", [dataset.id], gid)
# Dataset should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Dataset", dataset.id) is None, \
"Dataset should not be available in original group"
# Switch to new group - confirm that Dataset, Image is there.
gatewaywrapper.gateway.setGroupForSession(gid)
ds = gatewaywrapper.gateway.getObject("Dataset", dataset.id)
assert ds is not None, "Dataset should be available in new group"
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
def testPDIChgrp(gatewaywrapper):
"""
Create a new group with the User as member. Test move the
Project/Dataset/Image to new group.
"""
gatewaywrapper.loginAsAuthor()
link = gatewaywrapper.createPDTree(project="testPDIChgrp",
dataset="testPDIChgrp")
dataset = link.getChild() # DatasetWrapper
# omero.model.ProjectI - link.getParent() overwritten - returns None
project = link.parent
image = gatewaywrapper.createTestImage(dataset=dataset)
grp = project.details.group
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"chgrp-test-%s" % uuid, member_Ids=[ctx.userId], perms=COLLAB)
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Image", image.id) is not None
try:
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Project", [project.id.val], gid)
# Image should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Image", image.id) is None, \
"Image should not be available in original group"
# Switch to new group - confirm that Project, Dataset, Image is there.
gatewaywrapper.gateway.setGroupForSession(gid)
prj = gatewaywrapper.gateway.getObject("Project", project.id.val)
assert prj is not None, "Project should be available in new group"
ds = gatewaywrapper.gateway.getObject("Dataset", dataset.id)
assert ds is not None, "Dataset should be available in new group"
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
finally:
# Change it all back
gatewaywrapper.loginAsAuthor()
# Do the Chgrp
doChange(gatewaywrapper.gateway, "Project", [project.id.val],
grp.id.val)
# Image should again be available in current group
assert gatewaywrapper.gateway.getObject("Image", image.id) \
is not None, "Image should be available in original group"
def testTwoDatasetsChgrpToProject(gatewaywrapper):
"""
Create a new group with the User as member. Image has 2 Dataset Parents.
Test move one Dataset to new group. Image does not move. Move 2nd Dataset
- Image moves.
"""
gatewaywrapper.loginAsAuthor()
dataset = gatewaywrapper.createPDTree(
dataset="testTwoDatasetsChgrpToProject")
image = gatewaywrapper.createTestImage(dataset=dataset)
orig_gid = dataset.details.group.id.val
new_ds = gatewaywrapper.createPDTree(
dataset="testTwoDatasetsChgrp-parent2")
update = gatewaywrapper.gateway.getUpdateService()
link = omero.model.DatasetImageLinkI()
link.setParent(omero.model.DatasetI(new_ds.id, False))
link.setChild(omero.model.ImageI(image.id, False))
update.saveObject(link)
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup("chgrp-test-%s" % uuid,
member_Ids=[ctx.userId])
gatewaywrapper.loginAsAuthor()
assert gatewaywrapper.gateway.getObject("Dataset", dataset.id) is not None
# create Project in destination group
gatewaywrapper.gateway.setGroupForSession(gid)
p = omero.model.ProjectI()
p.name = rstring("testTwoDatasetsChgrpToProject")
p = gatewaywrapper.gateway.getUpdateService().saveAndReturnObject(p)
assert p.details.group.id.val == gid, \
"Project should be created in target group"
gatewaywrapper.gateway.setGroupForSession(orig_gid) # switch back
# Do the Chgrp with one of the parents
doChange(gatewaywrapper.gateway, "Dataset", [new_ds.id], gid)
# Dataset should no-longer be available in current group
assert gatewaywrapper.gateway.getObject("Dataset", new_ds.id) is None, \
"Dataset should not be available in original group"
assert gatewaywrapper.gateway.getObject("Dataset", dataset.getId()) \
is not None, "Other Dataset should still be in original group"
# But Image should
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, \
"Image should still be available in original group"
# Do the Chgrp with the OTHER parent
# switch BEFORE doChange to allow Project link Save
gatewaywrapper.gateway.setGroupForSession(gid)
doChange(gatewaywrapper.gateway, "Dataset", [dataset.id], gid,
container_id=p.id.val)
# Confirm that Dataset AND Image is now in new group
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
ds = gatewaywrapper.gateway.getObject("Dataset", dataset.id)
projects = list(ds.listParents())
assert len(projects) == 1, \
"Dataset should have one parent Project in new group"
assert projects[0].getId() == p.id.val, \
"Check Dataset parent is Project created above"
assert ds is not None, "Dataset should now be available in new group"
assert ds.getDetails().getGroup().id == gid, \
"Dataset group.id should match new group"
img = gatewaywrapper.gateway.getObject("Image", image.id)
assert img is not None, "Image should now be available in new group"
assert img.getDetails().getGroup().id == gid, \
"Image group.id should match new group"
def testMultiDatasetDoAll(gatewaywrapper):
"""
Need to enable chgrp independently of EventContext group being the
destination group.
Other tests that do not set omero.group require this for DoAll Save to
work.
"""
gatewaywrapper.loginAsAuthor()
ctx = gatewaywrapper.gateway.getAdminService().getEventContext()
uuid = ctx.sessionUuid
update = gatewaywrapper.gateway.getUpdateService()
new_ds = omero.model.DatasetI()
new_ds.name = rstring("testMultiDatasetDoAll")
new_ds = update.saveAndReturnObject(new_ds)
new_ds2 = omero.model.DatasetI()
new_ds2.name = rstring("testMultiDatasetDoAll2")
new_ds2 = update.saveAndReturnObject(new_ds2)
# new group
gatewaywrapper.loginAsAdmin()
gid = gatewaywrapper.gateway.createGroup(
"testMultiDatasetDoAll-%s" % uuid, member_Ids=[ctx.userId])
gatewaywrapper.loginAsAuthor()
# create Project in new group
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup(gid)
p = omero.model.ProjectI()
p.name = rstring("testMultiChgrp")
p = gatewaywrapper.gateway.getUpdateService().saveAndReturnObject(
p, gatewaywrapper.gateway.SERVICE_OPTS)
assert p.details.group.id.val == gid, \
"Project should be created in target group"
# Test that this works whichever group you're in
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup(ctx.groupId)
dsIds = [new_ds.id.val, new_ds2.id.val]
# Chgrp
doChange(gatewaywrapper.gateway, "Dataset", dsIds, gid,
container_id=p.id.val)
# Check all objects in destination group
# we can get objects from either group...
gatewaywrapper.gateway.SERVICE_OPTS.setOmeroGroup(-1)
p = gatewaywrapper.gateway.getObject("Project", p.id.val)
datasets = list(p.listChildren())
assert len(datasets) == 2, "Project should have 2 new Datasets"
for d in datasets:
assert d.details.group.id.val == gid, "Dataset should be in new group"
assert d.getId() in dsIds, "Checking Datasets by ID"
| dominikl/openmicroscopy | components/tools/OmeroPy/test/integration/gatewaytest/test_chgrp.py | Python | gpl-2.0 | 12,030 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.tiny',
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
}
}
def get_fake_block_device_info(target_portal, volume_id):
return {'block_device_mapping': [{'connection_info': {
'driver_volume_type': 'iscsi',
'data': {'target_lun': 1,
'volume_id': volume_id,
'target_iqn':
'iqn.2010-10.org.openstack:volume-' +
volume_id,
'target_portal': target_portal,
'target_discovered': False}},
'mount_device': 'vda',
'delete_on_termination': False}],
'root_device_name': None,
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
if 'instance_type' not in values:
return
instance_type = values['instance_type']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'instance_type': instance_type,
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': instance_type['root_gb'],
}
return FakeModel(base_options)
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_instance_type_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
| sacharya/nova | nova/tests/virt/hyperv/db_fakes.py | Python | apache-2.0 | 5,743 |
from django.conf.urls import url
from wagtail.documents.views import serve
urlpatterns = [
url(r'^(\d+)/(.*)$', serve.serve, name='wagtaildocs_serve'),
url(r'^authenticate_with_password/(\d+)/$', serve.authenticate_with_password,
name='wagtaildocs_authenticate_with_password'),
]
| mikedingjan/wagtail | wagtail/documents/urls.py | Python | bsd-3-clause | 298 |
#!/usr/bin/env python
# File created on 10 Nov 2011
from __future__ import division
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jesse Stombaugh"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
from qiime.util import parse_command_line_parameters, make_option
from cogent.parse.tree import DndParser
from cogent.core.tree import PhyloNode
from qiime.clean_raxml_parsimony_tree import decorate_numtips, decorate_depth,\
get_insert_dict, drop_duplicate_nodes
scoring_methods = ['depth', 'numtips']
script_info = {}
script_info['brief_description'] = "Remove duplicate tips from Raxml Tree"
script_info[
'script_description'] = "This script allows the user to remove specific duplicate tips from a Raxml tree."
script_info['script_usage'] = []
script_info['script_usage'].append(
("Example (depth):",
"For this case the user can pass in input Raxml tree, duplicate tips, and define an output filepath. When using the depth option, only the deepest replicate is kept. ",
" %prog -i raxml_v730_final_placement.tre -t 6 -o raxml_v730_final_placement_depth.tre"))
script_info['script_usage'].append(
("Example (numtips):",
"For this case the user can pass in input Raxml tree, duplicate tips, and define an output filepath. When using the numtips option, the replicate with the fewest siblings is kept. ",
" %prog -i raxml_v730_final_placement.tre -t 6 -o raxml_v730_final_placement_numtips.tre -s numtips"))
script_info['output_description'] = ""
script_info['required_options'] = [
make_option(
'-i',
'--input_tree',
type="existing_filepath",
help='the input raxml parsimony tree'),
make_option(
'-t',
'--tips_to_keep',
type="string",
help='the input tips to score and retain (comma-separated list)'),
make_option(
'-o',
'--output_fp',
type="new_filepath",
help='the output filepath'),
]
script_info['optional_options'] = [
make_option(
'-s',
'--scoring_method',
type="choice",
help='the scoring method either depth or numtips [default: %default]',
default='depth',
choices=scoring_methods),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
# get options
tree_fp = opts.input_tree
tips_to_keep = opts.tips_to_keep.split(',')
scoring_method = opts.scoring_method
# load tree
tree = DndParser(open(tree_fp, 'U'), constructor=PhyloNode)
# decorate measurements onto tree (either by depth or by number of
# children)
if scoring_method == 'depth':
tree2 = decorate_depth(tree)
elif scoring_method == 'numtips':
tree2 = decorate_numtips(tree)
# get the nodes for the inserted sequences
nodes_dict = get_insert_dict(tree2, set(tips_to_keep))
# remove nodes accordingly
final_tree = drop_duplicate_nodes(tree2, nodes_dict)
# final_tree.nameUnnamedNodes()
# write out the resulting tree
open_outpath = open(opts.output_fp, 'w')
open_outpath.write(final_tree.getNewick(with_distances=True))
open_outpath.close()
if __name__ == "__main__":
main()
| josenavas/qiime | scripts/clean_raxml_parsimony_tree.py | Python | gpl-2.0 | 3,356 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.info import constants
from openstack_dashboard.dashboards.admin.info import tables
class ServicesTab(tabs.TableTab):
table_classes = (tables.ServicesTable,)
name = _("Services")
slug = "services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
def get_services_data(self):
request = self.tab_group.request
services = []
for i, service in enumerate(request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, request.user.services_region))
return services
class NovaServicesTab(tabs.TableTab):
table_classes = (tables.NovaServicesTable,)
name = _("Compute Services")
slug = "nova_services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
permissions = ('openstack.services.compute',)
def get_nova_services_data(self):
try:
services = nova.service_list(self.tab_group.request)
except Exception:
msg = _('Unable to get nova services list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
services = []
return services
class CinderServicesTab(tabs.TableTab):
table_classes = (tables.CinderServicesTable,)
name = _("Block Storage Services")
slug = "cinder_services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
permissions = ('openstack.services.volume',)
def get_cinder_services_data(self):
try:
services = cinder.service_list(self.tab_group.request)
except Exception:
msg = _('Unable to get cinder services list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
services = []
return services
class NetworkAgentsTab(tabs.TableTab):
table_classes = (tables.NetworkAgentsTable,)
name = _("Network Agents")
slug = "network_agents"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
def allowed(self, request):
try:
return (base.is_service_enabled(request, 'network') and
neutron.is_extension_supported(request, 'agent'))
except Exception:
exceptions.handle(request, _('Unable to get network agents info.'))
return False
def get_network_agents_data(self):
try:
agents = neutron.agent_list(self.tab_group.request)
except Exception:
msg = _('Unable to get network agents list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
agents = []
return agents
class SystemInfoTabs(tabs.TabGroup):
slug = "system_info"
tabs = (ServicesTab, NovaServicesTab, CinderServicesTab,
NetworkAgentsTab)
sticky = True
| zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/admin/info/tabs.py | Python | apache-2.0 | 3,879 |
# -*- coding: utf-8 -*-
import collections
import logging
import time
from mock import MagicMock, patch
from . import unittest
from kafka import KafkaClient, SimpleProducer
from kafka.common import (
AsyncProducerQueueFull, FailedPayloadsError, NotLeaderForPartitionError,
ProduceResponse, RetryOptions, TopicAndPartition
)
from kafka.producer.base import Producer, _send_upstream
from kafka.protocol import CODEC_NONE
import threading
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
try:
xrange
except NameError:
xrange = range
class TestKafkaProducer(unittest.TestCase):
def test_producer_message_types(self):
producer = Producer(MagicMock())
topic = b"test-topic"
partition = 0
bad_data_types = (u'你怎么样?', 12, ['a', 'list'], ('a', 'tuple'), {'a': 'dict'})
for m in bad_data_types:
with self.assertRaises(TypeError):
logging.debug("attempting to send message of type %s", type(m))
producer.send_messages(topic, partition, m)
good_data_types = (b'a string!',)
for m in good_data_types:
# This should not raise an exception
producer.send_messages(topic, partition, m)
def test_topic_message_types(self):
client = MagicMock()
def partitions(topic):
return [0, 1]
client.get_partition_ids_for_topic = partitions
producer = SimpleProducer(client, random_start=False)
topic = b"test-topic"
producer.send_messages(topic, b'hi')
assert client.send_produce_request.called
@patch('kafka.producer.base._send_upstream')
def test_producer_async_queue_overfilled(self, mock):
queue_size = 2
producer = Producer(MagicMock(), async=True,
async_queue_maxsize=queue_size)
topic = b'test-topic'
partition = 0
message = b'test-message'
with self.assertRaises(AsyncProducerQueueFull):
message_list = [message] * (queue_size + 1)
producer.send_messages(topic, partition, *message_list)
self.assertEqual(producer.queue.qsize(), queue_size)
for _ in xrange(producer.queue.qsize()):
producer.queue.get()
def test_producer_sync_fail_on_error(self):
error = FailedPayloadsError('failure')
with patch.object(KafkaClient, 'load_metadata_for_topics'):
with patch.object(KafkaClient, 'get_partition_ids_for_topic', return_value=[0, 1]):
with patch.object(KafkaClient, '_send_broker_aware_request', return_value = [error]):
client = KafkaClient(MagicMock())
producer = SimpleProducer(client, async=False, sync_fail_on_error=False)
# This should not raise
(response,) = producer.send_messages('foobar', b'test message')
self.assertEqual(response, error)
producer = SimpleProducer(client, async=False, sync_fail_on_error=True)
with self.assertRaises(FailedPayloadsError):
producer.send_messages('foobar', b'test message')
class TestKafkaProducerSendUpstream(unittest.TestCase):
def setUp(self):
self.client = MagicMock()
self.queue = Queue()
def _run_process(self, retries_limit=3, sleep_timeout=1):
# run _send_upstream process with the queue
stop_event = threading.Event()
retry_options = RetryOptions(limit=retries_limit,
backoff_ms=50,
retry_on_timeouts=False)
self.thread = threading.Thread(
target=_send_upstream,
args=(self.queue, self.client, CODEC_NONE,
0.3, # batch time (seconds)
3, # batch length
Producer.ACK_AFTER_LOCAL_WRITE,
Producer.DEFAULT_ACK_TIMEOUT,
retry_options,
stop_event))
self.thread.daemon = True
self.thread.start()
time.sleep(sleep_timeout)
stop_event.set()
def test_wo_retries(self):
# lets create a queue and add 10 messages for 1 partition
for i in range(10):
self.queue.put((TopicAndPartition("test", 0), "msg %i", "key %i"))
self._run_process()
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 4 non-void cals:
# 3 batches of 3 msgs each + 1 batch of 1 message
self.assertEqual(self.client.send_produce_request.call_count, 4)
def test_first_send_failed(self):
# lets create a queue and add 10 messages for 10 different partitions
# to show how retries should work ideally
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i"))
# Mock offsets counter for closure
offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
self.client.is_first_time = True
def send_side_effect(reqs, *args, **kwargs):
if self.client.is_first_time:
self.client.is_first_time = False
return [FailedPayloadsError(req) for req in reqs]
responses = []
for req in reqs:
offset = offsets[req.topic][req.partition]
offsets[req.topic][req.partition] += len(req.messages)
responses.append(
ProduceResponse(req.topic, req.partition, 0, offset)
)
return responses
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(2)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 5 non-void calls: 1st failed batch of 3 msgs
# plus 3 batches of 3 msgs each + 1 batch of 1 message
self.assertEqual(self.client.send_produce_request.call_count, 5)
def test_with_limited_retries(self):
# lets create a queue and add 10 messages for 10 different partitions
# to show how retries should work ideally
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i" % i, "key %i" % i))
def send_side_effect(reqs, *args, **kwargs):
return [FailedPayloadsError(req) for req in reqs]
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(3, 3)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 16 non-void calls:
# 3 initial batches of 3 msgs each + 1 initial batch of 1 msg +
# 3 retries of the batches above = (1 + 3 retries) * 4 batches = 16
self.assertEqual(self.client.send_produce_request.call_count, 16)
def test_async_producer_not_leader(self):
for i in range(10):
self.queue.put((TopicAndPartition("test", i), "msg %i", "key %i"))
# Mock offsets counter for closure
offsets = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
self.client.is_first_time = True
def send_side_effect(reqs, *args, **kwargs):
if self.client.is_first_time:
self.client.is_first_time = False
return [ProduceResponse(req.topic, req.partition,
NotLeaderForPartitionError.errno, -1)
for req in reqs]
responses = []
for req in reqs:
offset = offsets[req.topic][req.partition]
offsets[req.topic][req.partition] += len(req.messages)
responses.append(
ProduceResponse(req.topic, req.partition, 0, offset)
)
return responses
self.client.send_produce_request.side_effect = send_side_effect
self._run_process(2)
# the queue should be void at the end of the test
self.assertEqual(self.queue.empty(), True)
# there should be 5 non-void calls: 1st failed batch of 3 msgs
# + 3 batches of 3 msgs each + 1 batch of 1 msg = 1 + 3 + 1 = 5
self.assertEqual(self.client.send_produce_request.call_count, 5)
def tearDown(self):
for _ in xrange(self.queue.qsize()):
self.queue.get()
| nmandavia/kafka-python | test/test_producer.py | Python | apache-2.0 | 8,543 |
""" Additional extras go here.
""" | kartta-labs/mapwarper | lib/tilestache/TileStache-1.51.5/TileStache/Goodies/__init__.py | Python | mit | 34 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules dependency graph. """
import os, sys, imp
from os.path import join as opj
import itertools
import zipimport
import openerp
import openerp.osv as osv
import openerp.tools as tools
import openerp.tools.osutil as osutil
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import zipfile
import openerp.release as release
import re
import base64
from zipfile import PyZipFile, ZIP_DEFLATED
from cStringIO import StringIO
import logging
_logger = logging.getLogger(__name__)
class Graph(dict):
""" Modules dependency graph.
The graph is a mapping from module name to Nodes.
"""
def add_node(self, name, info):
max_depth, father = 0, None
for n in [Node(x, self, None) for x in info['depends']]:
if n.depth >= max_depth:
father = n
max_depth = n.depth
if father:
return father.add_child(name, info)
else:
return Node(name, self, info)
def update_from_db(self, cr):
if not len(self):
return
# update the graph with values from the database (if exist)
## First, we set the default values for each package in graph
additional_data = dict.fromkeys(self.keys(), {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None})
## Then we get the values from the database
cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
' FROM ir_module_module'
' WHERE name IN %s',(tuple(additional_data),)
)
## and we update the default values with values from the database
additional_data.update(dict([(x.pop('name'), x) for x in cr.dictfetchall()]))
for package in self.values():
for k, v in additional_data[package.name].items():
setattr(package, k, v)
def add_module(self, cr, module, force=None):
self.add_modules(cr, [module], force)
def add_modules(self, cr, module_list, force=None):
if force is None:
force = []
packages = []
len_graph = len(self)
# force additional dependencies for the upgrade process if given
# in config file
forced_deps = tools.config.get_misc('openupgrade', 'force_deps', '{}')
forced_deps = tools.config.get_misc('openupgrade',
'force_deps_' + release.version,
forced_deps)
forced_deps = tools.safe_eval.safe_eval(forced_deps)
for module in module_list:
# This will raise an exception if no/unreadable descriptor file.
# NOTE The call to load_information_from_description_file is already
# done by db.initialize, so it is possible to not do it again here.
info = openerp.modules.module.load_information_from_description_file(module)
if info and info['installable']:
info['depends'].extend(forced_deps.get(module, []))
packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
else:
_logger.warning('module %s: not installable, skipped', module)
dependencies = dict([(p, info['depends']) for p, info in packages])
current, later = set([p for p, info in packages]), set()
while packages and current > later:
package, info = packages[0]
deps = info['depends']
# if all dependencies of 'package' are already in the graph, add 'package' in the graph
if reduce(lambda x, y: x and y in self, deps, True):
if not package in current:
packages.pop(0)
continue
later.clear()
current.remove(package)
node = self.add_node(package, info)
node.data = info
for kind in ('init', 'demo', 'update'):
if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force:
setattr(node, kind, True)
else:
later.add(package)
packages.append((package, info))
packages.pop(0)
self.update_from_db(cr)
for package in later:
unmet_deps = filter(lambda p: p not in self, dependencies[package])
_logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps))
result = len(self) - len_graph
if result != len(module_list):
_logger.warning('Some modules were not loaded.')
return result
def __iter__(self):
level = 0
done = set(self.keys())
while done:
level_modules = sorted((name, module) for name, module in self.items() if module.depth==level)
for name, module in level_modules:
done.remove(name)
yield module
level += 1
class Singleton(object):
def __new__(cls, name, graph, info):
if name in graph:
inst = graph[name]
else:
inst = object.__new__(cls)
inst.name = name
inst.info = info
graph[name] = inst
return inst
class Node(Singleton):
""" One module in the modules dependency graph.
Node acts as a per-module singleton. A node is constructed via
Graph.add_module() or Graph.add_modules(). Some of its fields are from
ir_module_module (setted by Graph.update_from_db()).
"""
def __init__(self, name, graph, info):
self.graph = graph
if not hasattr(self, 'children'):
self.children = []
if not hasattr(self, 'depth'):
self.depth = 0
def add_child(self, name, info):
node = Node(name, self.graph, info)
node.depth = self.depth + 1
if node not in self.children:
self.children.append(node)
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.children.sort(lambda x, y: cmp(x.name, y.name))
return node
def __setattr__(self, name, value):
super(Singleton, self).__setattr__(name, value)
if name in ('init', 'update', 'demo'):
tools.config[name][self.name] = 1
for child in self.children:
setattr(child, name, value)
if name == 'depth':
for child in self.children:
setattr(child, name, value + 1)
def __iter__(self):
return itertools.chain(iter(self.children), *map(iter, self.children))
def __str__(self):
return self._pprint()
def _pprint(self, depth=0):
s = '%s\n' % self.name
for c in self.children:
s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1))
return s
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| legalsylvain/OpenUpgrade | openerp/modules/graph.py | Python | agpl-3.0 | 8,097 |
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.mashups.interactive import interactive_shell
import boto
import os
import time
import shutil
import StringIO
import paramiko
import socket
import subprocess
class SSHClient(object):
def __init__(self, server,
host_key_file='~/.ssh/known_hosts',
uname='root', timeout=None, ssh_pwd=None):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
self._timeout = timeout
self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file,
password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self, num_retries=5):
retry = 0
while retry < num_retries:
try:
self._ssh_client.connect(self.server.hostname,
username=self.uname,
pkey=self._pkey,
timeout=self._timeout)
return
except socket.error, (value, message):
if value in (51, 61, 111):
print 'SSH Connection refused, will retry in 5 seconds'
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname
print 'Edit that file to remove the entry and then hit return to try again'
raw_input('Hit Enter when ready')
retry += 1
except EOFError:
print 'Unexpected Error from SSH Connection, retry in 5 seconds'
time.sleep(5)
retry += 1
print 'Could not establish SSH connection'
def open_sftp(self):
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def open(self, filename, mode='r', bufsize=-1):
"""
Open a file on the remote system and return a file-like object.
"""
sftp_client = self.open_sftp()
return sftp_client.open(filename, mode, bufsize)
def listdir(self, path):
sftp_client = self.open_sftp()
return sftp_client.listdir(path)
def isdir(self, path):
status = self.run('[ -d %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def exists(self, path):
status = self.run('[ -a %s ] || echo "FALSE"' % path)
if status[1].startswith('FALSE'):
return 0
return 1
def shell(self):
"""
Start an interactive shell session on the remote host.
"""
channel = self._ssh_client.invoke_shell()
interactive_shell(channel)
def run(self, command):
"""
Execute a command on the remote host. Return a tuple containing
an integer status and two strings, the first containing stdout
and the second containing stderr from the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
status = 0
try:
t = self._ssh_client.exec_command(command)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
boto.log.debug('stdout: %s' % std_out)
boto.log.debug('stderr: %s' % std_err)
return (status, std_out, std_err)
def run_pty(self, command):
"""
Execute a command on the remote host with a pseudo-terminal.
Returns a string containing the output of the command.
"""
boto.log.debug('running:%s on %s' % (command, self.server.instance_id))
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
transport = self._ssh_client.get_transport()
transport.close()
self.server.reset_cmdshell()
class LocalClient(object):
def __init__(self, server, host_key_file=None, uname='root'):
self.server = server
self.host_key_file = host_key_file
self.uname = uname
def get_file(self, src, dst):
shutil.copyfile(src, dst)
def put_file(self, src, dst):
shutil.copyfile(src, dst)
def listdir(self, path):
return os.listdir(path)
def isdir(self, path):
return os.path.isdir(path)
def exists(self, path):
return os.path.exists(path)
def shell(self):
raise NotImplementedError('shell not supported with LocalClient')
def run(self):
boto.log.info('running:%s' % self.command)
log_fp = StringIO.StringIO()
process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.poll() is None:
time.sleep(1)
t = process.communicate()
log_fp.write(t[0])
log_fp.write(t[1])
boto.log.info(log_fp.getvalue())
boto.log.info('output: %s' % log_fp.getvalue())
return (process.returncode, log_fp.getvalue())
def close(self):
pass
class FakeServer(object):
"""
A little class to fake out SSHClient (which is expecting a
:class`boto.manage.server.Server` instance. This allows us
to
"""
def __init__(self, instance, ssh_key_file):
self.instance = instance
self.ssh_key_file = ssh_key_file
self.hostname = instance.dns_name
self.instance_id = self.instance.id
def start(server):
instance_id = boto.config.get('Instance', 'instance-id', None)
if instance_id == server.instance_id:
return LocalClient(server)
else:
return SSHClient(server)
def sshclient_from_instance(instance, ssh_key_file,
host_key_file='~/.ssh/known_hosts',
user_name='root', ssh_pwd=None):
"""
Create and return an SSHClient object given an
instance object.
:type instance: :class`boto.ec2.instance.Instance` object
:param instance: The instance object.
:type ssh_key_file: str
:param ssh_key_file: A path to the private key file used
to log into instance.
:type host_key_file: str
:param host_key_file: A path to the known_hosts file used
by the SSH client.
Defaults to ~/.ssh/known_hosts
:type user_name: str
:param user_name: The username to use when logging into
the instance. Defaults to root.
:type ssh_pwd: str
:param ssh_pwd: The passphrase, if any, associated with
private key.
"""
s = FakeServer(instance, ssh_key_file)
return SSHClient(s, host_key_file, user_name, ssh_pwd)
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/manage/cmdshell.py | Python | gpl-3.0 | 8,585 |
import time
from ajenti.com import *
from ajenti.ui import *
from ajenti.utils import shell, str_fsize
class BSDIfconfig(Plugin):
platform = ['FreeBSD']
def get_info(self, iface):
ui = UI.Container(
UI.Formline(
UI.HContainer(
UI.Image(file='/dl/network/%s.png'%('up' if iface.up else 'down')),
UI.Label(text=iface.name, bold=True)
),
text='Interface',
),
UI.Formline(
UI.Label(text=self.get_ip(iface)),
text='Address',
),
UI.Formline(
UI.Label(text='Up %s, down %s' % (
str_fsize(self.get_tx(iface)),
str_fsize(self.get_rx(iface)),
)),
text='Traffic',
),
)
return ui
def get_tx(self, iface):
s = shell('netstat -bI %s | grep -v Link | grep -v pkts'%iface.name)
try:
s = s.split()[10]
except:
s = '0'
return int(s)
def get_rx(self, iface):
s = shell('netstat -bI %s | grep -v Link | grep -v pkts'%iface.name)
try:
s = s.split()[7]
except:
s = '0'
return int(s)
def get_ip(self, iface):
s = shell('ifconfig %s | grep \'inet \''%iface.name)
try:
s = s.split()[1]
except:
s = '0.0.0.0'
return s
def detect_dev_class(self, iface):
if iface.name[:-1] == 'gif':
return 'tunnel'
if iface.name == 'lo':
return 'loopback'
return 'ethernet'
def detect_iface_bits(self, iface):
r = ['bsd-basic']
cls = self.detect_dev_class(iface)
if iface.addressing == 'static':
r.append('bsd-ipv4')
if cls == 'tunnel':
r.append('bsd-tunnel')
return r
def up(self, iface):
shell('ifconfig %s up' % iface.name)
time.sleep(1)
def down(self, iface):
shell('ifconfig %s down' % iface.name)
time.sleep(1)
| DVSBA/ajenti | plugins/network/nctp_bsd.py | Python | lgpl-3.0 | 2,275 |
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the available providers."""
__author__ = '[email protected] (Jason Stredwick)'
class Provider(object):
"""Define available providers."""
DATASTORE = 'datastore'
ISSUETRACKER = 'issuetracker'
| liefdiy/bite-project | tools/bugs/server/appengine/providers/provider.py | Python | apache-2.0 | 816 |
# Copyright (c) 2014 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""correct Vxlan Endpoint primary key
Revision ID: 4eba2f05c2f4
Revises: 884573acbf1c
Create Date: 2014-07-07 22:48:38.544323
"""
# revision identifiers, used by Alembic.
revision = '4eba2f05c2f4'
down_revision = '884573acbf1c'
from alembic import op
TABLE_NAME = 'ml2_vxlan_endpoints'
PK_NAME = 'ml2_vxlan_endpoints_pkey'
def upgrade():
op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address'])
def downgrade():
op.drop_constraint(PK_NAME, TABLE_NAME, type_='primary')
op.create_primary_key(PK_NAME, TABLE_NAME, cols=['ip_address', 'udp_port'])
| cernops/neutron | neutron/db/migration/alembic_migrations/versions/4eba2f05c2f4_correct_vxlan_endpoint_primary_key.py | Python | apache-2.0 | 1,254 |
#!/usr/bin/env python3
from storer import Storer
import sys
s = Storer()
if s.get_value() != 0:
print('Initial value incorrect.')
sys.exit(1)
s.set_value(42)
if s.get_value() != 42:
print('Setting value failed.')
sys.exit(1)
try:
s.set_value('not a number')
print('Using wrong argument type did not fail.')
sys.exit(1)
except TypeError:
pass
| aaronp24/meson | test cases/python3/3 cython/cytest.py | Python | apache-2.0 | 380 |
#
# The Python Imaging Library.
# $Id$
#
# MSP file handling
#
# This is the format used by the Paint program in Windows 1 and 2.
#
# History:
# 95-09-05 fl Created
# 97-01-03 fl Read/write MSP images
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-97.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
from PIL import Image, ImageFile, _binary
#
# read MSP files
i16 = _binary.i16le
def _accept(prefix):
return prefix[:4] in [b"DanM", b"LinS"]
##
# Image plugin for Windows MSP images. This plugin supports both
# uncompressed (Windows 1.0).
class MspImageFile(ImageFile.ImageFile):
format = "MSP"
format_description = "Windows Paint"
def _open(self):
# Header
s = self.fp.read(32)
if s[:4] not in [b"DanM", b"LinS"]:
raise SyntaxError("not an MSP file")
# Header checksum
sum = 0
for i in range(0, 32, 2):
sum = sum ^ i16(s[i:i+2])
if sum != 0:
raise SyntaxError("bad MSP checksum")
self.mode = "1"
self.size = i16(s[4:]), i16(s[6:])
if s[:4] == b"DanM":
self.tile = [("raw", (0,0)+self.size, 32, ("1", 0, 1))]
else:
self.tile = [("msp", (0,0)+self.size, 32+2*self.size[1], None)]
#
# write MSP files (uncompressed only)
o16 = _binary.o16le
def _save(im, fp, filename):
if im.mode != "1":
raise IOError("cannot write mode %s as MSP" % im.mode)
# create MSP header
header = [0] * 16
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
header[2], header[3] = im.size
header[4], header[5] = 1, 1
header[6], header[7] = 1, 1
header[8], header[9] = im.size
sum = 0
for h in header:
sum = sum ^ h
header[12] = sum # FIXME: is this the right field?
# header
for h in header:
fp.write(o16(h))
# image body
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 32, ("1", 0, 1))])
#
# registry
Image.register_open("MSP", MspImageFile, _accept)
Image.register_save("MSP", _save)
Image.register_extension("MSP", ".msp")
| havard024/prego | venv/lib/python2.7/site-packages/PIL/MspImagePlugin.py | Python | mit | 2,173 |
from .wiki import *
| procamora/Wiki-Personal | pelican-plugins/github-wiki/__init__.py | Python | gpl-3.0 | 20 |
"""SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
| wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/__init__.py | Python | mit | 2,176 |
# test construction of bytearray from different objects
try:
from uarray import array
except ImportError:
try:
from array import array
except ImportError:
print("SKIP")
raise SystemExit
# arrays
print(bytearray(array('b', [1, 2])))
print(bytearray(array('h', [0x101, 0x202])))
| kerneltask/micropython | tests/basics/bytearray_construct_array.py | Python | mit | 314 |
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
class DNSDomain(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'domain': fields.StringField(),
'scope': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
}
@staticmethod
def _from_db_object(context, vif, db_vif):
for field in vif.fields:
vif[field] = db_vif[field]
vif._context = context
vif.obj_reset_changes()
return vif
@base.remotable_classmethod
def get_by_domain(cls, context, domain):
db_dnsd = db.dnsdomain_get(context, domain)
if db_dnsd:
return cls._from_db_object(context, cls(), db_dnsd)
@base.remotable_classmethod
def register_for_zone(cls, context, domain, zone):
db.dnsdomain_register_for_zone(context, domain, zone)
@base.remotable_classmethod
def register_for_project(cls, context, domain, project):
db.dnsdomain_register_for_project(context, domain, project)
@base.remotable_classmethod
def delete_by_domain(cls, context, domain):
db.dnsdomain_unregister(context, domain)
class DNSDomainList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('DNSDomain'),
}
child_versions = {
'1.0': '1.0',
}
@base.remotable_classmethod
def get_all(cls, context):
db_domains = db.dnsdomain_get_all(context)
return base.obj_make_list(context, cls(context), objects.DNSDomain,
db_domains)
| petrutlucian94/nova | nova/objects/dns_domain.py | Python | apache-2.0 | 2,520 |
#!/usr/bin/env python
try:
from collections import OrderedDict
import json
except ImportError:
from ordereddict import OrderedDict
import simplejson as json
import itertools
import six
from csvkit import CSVKitWriter
def parse_object(obj, path=''):
"""
Recursively parse JSON objects and a dictionary of paths/keys and values.
Inspired by JSONPipe (https://github.com/dvxhouse/jsonpipe).
"""
if isinstance(obj, dict):
iterator = obj.items()
elif isinstance(obj, (list, tuple)):
iterator = enumerate(obj)
else:
return { path.strip('/'): obj }
d = {}
for key, value in iterator:
key = six.text_type(key)
d.update(parse_object(value, path + key + '/'))
return d
def ndjson2csv(f, key=None, **kwargs):
"""
Convert a JSON document into CSV format.
Supports both JSON and "Newline-delimited JSON".
The top-level element of the input must be a list or a dictionary. If it is a dictionary, a key must be provided which is an item of the dictionary which contains a list.
"""
first_line = f.readline()
first_row = json.loads(first_line, object_pairs_hook=OrderedDict)
js = itertools.chain((first_row, ), (json.loads(l, object_pairs_hook=OrderedDict) for l in f))
fields = []
flat = []
for obj in js:
flat.append(parse_object(obj))
for key in obj.keys():
if key not in fields:
fields.append(key)
o = six.StringIO()
writer = CSVKitWriter(o)
writer.writerow(fields)
for i in flat:
row = []
for field in fields:
row.append(i.get(field, None))
writer.writerow(row)
output = o.getvalue()
o.close()
return output
| unpingco/csvkit | csvkit/convert/ndjs.py | Python | mit | 1,769 |
'''
This is a one-off command aimed at fixing a temporary problem encountered where input_state was added to
the same dict object in capa problems, so was accumulating. The fix is simply to remove input_state entry
from state for all problems in the affected date range.
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from courseware.models import StudentModule, StudentModuleHistory
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to remove the "input_state" entry in the StudentModule objects of any problems that
contain them. No problem is yet making use of this, and the code should do the right thing if it's
missing (by recreating an empty dict for its value).
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-29 16:30:00' (the problem must have been answered before the buggy code was reverted,
on Prod and Edge)
modified > '2013-03-28 22:00:00' (the problem must have been visited after the bug was introduced
on Prod and Edge)
state like '%input_state%' (the problem must have "input_state" set).
This filtering is done on the production database replica, so that the larger select queries don't lock
the real production database. The list of id values for Student Modules is written to a file, and the
file is passed into this command. The sql file passed to mysql contains:
select sm.id from courseware_studentmodule sm
where sm.modified > "2013-03-28 22:00:00"
and sm.created < "2013-03-29 16:30:00"
and sm.state like "%input_state%"
and sm.module_type = 'problem';
'''
num_visited = 0
num_changed = 0
num_hist_visited = 0
num_hist_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'),
)
def fix_studentmodules_in_list(self, save_changes, idlist_path):
'''Read in the list of StudentModule objects that might need fixing, and then fix each one'''
# open file and read id values from it:
for line in open(idlist_path, 'r'):
student_module_id = line.strip()
# skip the header, if present:
if student_module_id == 'id':
continue
try:
module = StudentModule.objects.get(id=student_module_id)
except StudentModule.DoesNotExist:
LOG.error(u"Unable to find student module with id = %s: skipping... ", student_module_id)
continue
self.remove_studentmodule_input_state(module, save_changes)
hist_modules = StudentModuleHistory.objects.filter(student_module_id=student_module_id)
for hist_module in hist_modules:
self.remove_studentmodulehistory_input_state(hist_module, save_changes)
if self.num_visited % 1000 == 0:
LOG.info(" Progress: updated {0} of {1} student modules".format(self.num_changed, self.num_visited))
LOG.info(" Progress: updated {0} of {1} student history modules".format(self.num_hist_changed,
self.num_hist_visited))
@transaction.autocommit
def remove_studentmodule_input_state(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_visited += 1
if 'input_state' not in state_dict:
pass
elif save_changes:
# make the change and persist
del state_dict['input_state']
module.state = json.dumps(state_dict)
module.save()
self.num_changed += 1
else:
# don't make the change, but increment the count indicating the change would be made
self.num_changed += 1
@transaction.autocommit
def remove_studentmodulehistory_input_state(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_hist_visited += 1
if 'input_state' not in state_dict:
pass
elif save_changes:
# make the change and persist
del state_dict['input_state']
module.state = json.dumps(state_dict)
module.save()
self.num_hist_changed += 1
else:
# don't make the change, but increment the count indicating the change would be made
self.num_hist_changed += 1
def handle(self, *args, **options):
'''Handle management command request'''
if len(args) != 1:
raise CommandError("missing idlist file")
idlist_path = args[0]
save_changes = options['save_changes']
LOG.info("Starting run: reading from idlist file {0}; save_changes = {1}".format(idlist_path, save_changes))
self.fix_studentmodules_in_list(save_changes, idlist_path)
LOG.info("Finished run: updating {0} of {1} student modules".format(self.num_changed, self.num_visited))
LOG.info("Finished run: updating {0} of {1} student history modules".format(self.num_hist_changed,
self.num_hist_visited))
| beni55/edx-platform | lms/djangoapps/courseware/management/commands/remove_input_state.py | Python | agpl-3.0 | 6,715 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import traceback
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib, env_fallback
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
BOTO_IMP_ERR = None
try:
import boto
import boto.ec2 # boto does weird import stuff
HAS_BOTO = True
except ImportError:
BOTO_IMP_ERR = traceback.format_exc()
HAS_BOTO = False
BOTO3_IMP_ERR = None
try:
import boto3
import botocore
HAS_BOTO3 = True
except Exception:
BOTO3_IMP_ERR = traceback.format_exc()
HAS_BOTO3 = False
try:
# Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
# uses this (and it works as expected). Python 2.6 will trigger the ImportError.
from functools import cmp_to_key
PY3_COMPARISON = True
except ImportError:
PY3_COMPARISON = False
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#
# TooManyRequestsException comes from inside botocore when it
# does retrys, unfortunately however it does not try long
# enough to allow some services such as API Gateway to
# complete configuration. At the moment of writing there is a
# botocore/boto3 bug open to fix this.
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError', 'TooManyRequestsException',
'Throttling'
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
not_found = re.compile(r'^\w+.NotFound')
return response_code in retry_on or not_found.search(response_code)
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
module.fail_json(msg=to_native(e))
except botocore.exceptions.NoRegionError as e:
module.fail_json(msg="The %s module requires a region and none was found in configuration, "
"environment variables or module parameters" % module._name)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
if params.get('config'):
config = params.pop('config')
config.user_agent_extra = 'Ansible/{0}'.format(__version__)
else:
config = botocore.config.Config(
user_agent_extra='Ansible/{0}'.format(__version__),
)
session = boto3.session.Session(
profile_name=profile,
)
if conn_type == 'resource':
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
elif conn_type == 'client':
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def boto_exception(err):
"""
Extracts the error message from a boto exception.
:param err: Exception from boto
:return: Error message
"""
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def aws_common_argument_spec():
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if os.environ.get('AWS_ACCESS_KEY_ID'):
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif os.environ.get('AWS_ACCESS_KEY'):
access_key = os.environ['AWS_ACCESS_KEY']
elif os.environ.get('EC2_ACCESS_KEY'):
access_key = os.environ['EC2_ACCESS_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
access_key = boto.config.get('Credentials', 'aws_access_key_id')
elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
access_key = boto.config.get('default', 'aws_access_key_id')
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if os.environ.get('AWS_SECRET_ACCESS_KEY'):
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif os.environ.get('AWS_SECRET_KEY'):
secret_key = os.environ['AWS_SECRET_KEY']
elif os.environ.get('EC2_SECRET_KEY'):
secret_key = os.environ['EC2_SECRET_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
secret_key = boto.config.get('default', 'aws_secret_access_key')
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'AWS_DEFAULT_REGION' in os.environ:
region = os.environ['AWS_DEFAULT_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
if not boto3:
if HAS_BOTO:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
else:
module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
region = botocore.session.Session(profile=profile_name).get_config_variable('region')
except botocore.exceptions.ProfileNotFound as e:
pass
else:
module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
if not security_token:
if os.environ.get('AWS_SECURITY_TOKEN'):
security_token = os.environ['AWS_SECURITY_TOKEN']
elif os.environ.get('AWS_SESSION_TOKEN'):
security_token = os.environ['AWS_SESSION_TOKEN']
elif os.environ.get('EC2_SECURITY_TOKEN'):
security_token = os.environ['EC2_SECURITY_TOKEN']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
security_token = boto.config.get('Credentials', 'aws_security_token')
elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
security_token = boto.config.get('default', 'aws_security_token')
else:
# in case secret_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
try:
conn = aws_module.connect_to_region(region, **params)
except(boto.provider.ProfileNotFoundError):
raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k, v in tags_dict.items():
tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = {'vpc-id': vpc_id}
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
return sec_group_id_list
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
tupleified = _hashable_policy(policy[key], [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
if PY3_COMPARISON:
policy_list.sort(key=cmp_to_key(py3cmp))
else:
policy_list.sort()
return policy_list
def py3cmp(a, b):
""" Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
elif a < b:
return -1
else:
return 0
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
str_ind = to_text(e).find('str')
tup_ind = to_text(e).find('tuple')
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
elif tup_ind < str_ind:
return 1
raise
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset
| alxgu/ansible | lib/ansible/module_utils/ec2.py | Python | gpl-3.0 | 28,571 |
from robot.api.deco import keyword
def defined_twice():
1/0
@keyword('Defined twice')
def this_time_using_custom_name():
2/0
def defined_thrice():
1/0
def definedThrice():
2/0
def Defined_Thrice():
3/0
@keyword('Embedded ${arguments} twice')
def embedded1(arg):
1/0
@keyword('Embedded ${arguments match} TWICE')
def embedded2(arg):
2/0
| ChrisHirsch/robotframework | atest/testdata/keywords/DupeKeywords.py | Python | apache-2.0 | 372 |
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightDef()
print """DESCRIPTION ""
EXPORTS
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in apiutil.AllSpecials( 'state' ):
print "crState%s" % func_name
for func_name in apiutil.AllSpecials( 'state_feedback' ):
print "crStateFeedback%s" % func_name
for func_name in apiutil.AllSpecials( 'state_select' ):
print "crStateSelect%s" % func_name
print """crStateInit
crStateReadPixels
crStateGetChromiumParametervCR
crStateCreateContext
crStateCreateContextEx
crStateDestroyContext
crStateDiffContext
crStateSwitchContext
crStateMakeCurrent
crStateSetCurrent
crStateFlushFunc
crStateFlushArg
crStateDiffAPI
crStateSetCurrentPointers
crStateResetCurrentPointers
crStateCurrentRecover
crStateTransformUpdateTransform
crStateColorMaterialRecover
crStateError
crStateUpdateColorBits
crStateClientInit
crStateGetCurrent
crStateLimitsInit
crStateMergeExtensions
crStateRasterPosUpdate
crStateTextureCheckDirtyImages
crStateExtensionsInit
crStateSetExtensionString
crStateUseServerArrays
crStateUseServerArrayElements
crStateComputeVersion
crStateTransformXformPointMatrixf
crStateTransformXformPointMatrixd
crStateInitMatrixStack
crStateLoadMatrix
__currentBits
"""
| yuyuyu101/VirtualBox-NetBSD | src/VBox/GuestHost/OpenGL/state_tracker/state_defs.py | Python | gpl-2.0 | 1,397 |
# -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HelloSignHookTests(WebhookTestCase):
STREAM_NAME = 'hellosign'
URL_TEMPLATE = "/api/v1/external/hellosign?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'hellosign'
def test_signatures_message(self):
# type: () -> None
expected_subject = "NDA with Acme Co."
expected_message = ("The NDA with Acme Co. is awaiting the signature of "
"Jack and was just signed by Jill.")
self.send_and_test_stream_message('signatures', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("hellosign", fixture_name, file_type="json")
| sonali0901/zulip | zerver/webhooks/hellosign/tests.py | Python | apache-2.0 | 884 |
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError
from tornado.log import gen_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
class EchoHandler(WebSocketHandler):
def initialize(self, close_future):
self.close_future = close_future
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
def on_close(self):
self.close_future.set_result(None)
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class WebSocketTest(AsyncHTTPTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
])
@gen_test
def test_websocket_gen(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port(),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
def test_websocket_callbacks(self):
websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(
'ws://localhost:%d/notfound' % self.get_http_port(),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield websocket_connect(
'ws://localhost:%d/non_ws' % self.get_http_port(),
io_loop=self.io_loop)
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(HTTPError) as cm:
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://localhost:%d/' % port,
io_loop=self.io_loop,
connect_timeout=0.01)
self.assertEqual(cm.exception.code, 599)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
ws.stream.close()
yield self.close_future
| Drvanon/Game | venv/lib/python3.3/site-packages/tornado/test/websocket_test.py | Python | apache-2.0 | 2,935 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import unittest
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.unittest import gtest_testrunner
from telemetry.unittest import options_for_unittests
def Discover(start_dir, top_level_dir=None, pattern='test*.py'):
loader = unittest.defaultTestLoader
loader.suiteClass = gtest_testrunner.GTestTestSuite
subsuites = []
modules = discover.DiscoverModules(start_dir, top_level_dir, pattern)
for module in modules:
if hasattr(module, 'suite'):
new_suite = module.suite()
else:
new_suite = loader.loadTestsFromModule(module)
if new_suite.countTestCases():
subsuites.append(new_suite)
return gtest_testrunner.GTestTestSuite(subsuites)
def FilterSuite(suite, predicate):
new_suite = suite.__class__()
for x in suite:
if isinstance(x, unittest.TestSuite):
subsuite = FilterSuite(x, predicate)
if subsuite.countTestCases() == 0:
continue
new_suite.addTest(subsuite)
continue
assert isinstance(x, unittest.TestCase)
if predicate(x):
new_suite.addTest(x)
return new_suite
def DiscoverAndRunTests(
dir_name, args, top_level_dir,
runner=None, run_disabled_tests=False):
if not runner:
runner = gtest_testrunner.GTestTestRunner(inner=True)
suite = Discover(dir_name, top_level_dir, '*_unittest.py')
def IsTestSelected(test):
if len(args) != 0:
found = False
for name in args:
if name in test.id():
found = True
if not found:
return False
if hasattr(test, '_testMethodName'):
method = getattr(test, test._testMethodName) # pylint: disable=W0212
if hasattr(method, '_requires_browser_types'):
types = method._requires_browser_types # pylint: disable=W0212
if options_for_unittests.GetBrowserType() not in types:
logging.debug('Skipping test %s because it requires %s' %
(test.id(), types))
return False
if hasattr(method, '_disabled_test'):
if not run_disabled_tests:
return False
return True
filtered_suite = FilterSuite(suite, IsTestSelected)
test_result = runner.run(filtered_suite)
return test_result
def Main(args, start_dir, top_level_dir, runner=None):
"""Unit test suite that collects all test cases for telemetry."""
# Add unittest_data to the path so we can import packages from it.
unittest_data_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'unittest_data'))
sys.path.append(unittest_data_dir)
default_options = browser_options.BrowserOptions()
default_options.browser_type = 'any'
parser = default_options.CreateParser('run_tests [options] [test names]')
parser.add_option('--repeat-count', dest='run_test_repeat_count',
type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Also run tests decorated with @DisabledTest.')
_, args = parser.parse_args(args)
logging_level = logging.getLogger().getEffectiveLevel()
if default_options.verbosity == 0:
logging.getLogger().setLevel(logging.WARN)
from telemetry.core import browser_finder
try:
browser_to_create = browser_finder.FindBrowser(default_options)
except browser_finder.BrowserFinderException, ex:
logging.error(str(ex))
return 1
if browser_to_create == None:
logging.error('No browser found of type %s. Cannot run tests.',
default_options.browser_type)
logging.error('Re-run with --browser=list to see available browser types.')
return 1
options_for_unittests.Set(default_options,
browser_to_create.browser_type)
olddir = os.getcwd()
try:
os.chdir(top_level_dir)
success = True
for _ in range(
default_options.run_test_repeat_count): # pylint: disable=E1101
success = success and DiscoverAndRunTests(
start_dir, args, top_level_dir,
runner, default_options.run_disabled_tests)
if success:
return 0
finally:
os.chdir(olddir)
options_for_unittests.Set(None, None)
if default_options.verbosity == 0:
# Restore logging level.
logging.getLogger().setLevel(logging_level)
return 1
| gfreed/android_external_chromium-org | tools/telemetry/telemetry/unittest/run_tests.py | Python | bsd-3-clause | 4,642 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values)
| npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/learn/python/learn/dataframe/transforms/example_parser.py | Python | apache-2.0 | 2,407 |
# -*- coding: utf-8 -*-
"""
Basic CLexer Test
~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
import os
from pygments.token import Text, Number
from pygments.lexers import CLexer
class CLexerTest(unittest.TestCase):
def setUp(self):
self.lexer = CLexer()
def testNumbers(self):
code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23'
wanted = []
for item in zip([Number.Integer, Number.Float, Number.Float,
Number.Float, Number.Oct, Number.Hex,
Number.Float, Number.Float], code.split()):
wanted.append(item)
wanted.append((Text, ' '))
wanted = [(Text, '')] + wanted[:-1] + [(Text, '\n')]
self.assertEqual(list(self.lexer.get_tokens(code)), wanted)
| djanowski/pygmentize | vendor/pygments/tests/test_clexer.py | Python | mit | 898 |
from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
from django.conf import settings
from django.utils import translation
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.validate(display_num_errors=True)
try:
self.check_migrations()
except ImproperlyConfigured:
pass
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write((
"%(started_at)s\n"
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"started_at": now,
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
# django.core.management.base forces the locale to en-us. We should
# set it up correctly for the first request (particularly important
# in the "--noreload" case).
translation.activate(settings.LANGUAGE_CODE)
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/django/core/management/commands/runserver.py | Python | agpl-3.0 | 7,307 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-03 20:50
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0005_merge'),
]
operations = [
migrations.RunSQL(
[
"""
CREATE INDEX fileversion_metadata_sha_arch_vault_index
ON osf_fileversion ((osf_fileversion.metadata -> 'sha256'), (osf_fileversion.metadata -> 'archive'), (
osf_fileversion.metadata -> 'vault'));
"""
],
[
"""
DROP INDEX fileversion_metadata_sha_arch_vault_index;
"""
]
)
]
| aaxelb/osf.io | osf/migrations/0006_add_jsonb_index_for_fileversions.py | Python | apache-2.0 | 765 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from collections import namedtuple
from cryptography import utils
from cryptography.exceptions import InternalError
from cryptography.hazmat.backends.commoncrypto.ciphers import (
_CipherContext, _GCMCipherContext
)
from cryptography.hazmat.backends.commoncrypto.hashes import _HashContext
from cryptography.hazmat.backends.commoncrypto.hmac import _HMACContext
from cryptography.hazmat.backends.interfaces import (
CipherBackend, HMACBackend, HashBackend, PBKDF2HMACBackend
)
from cryptography.hazmat.bindings.commoncrypto.binding import Binding
from cryptography.hazmat.primitives.ciphers.algorithms import (
AES, ARC4, Blowfish, CAST5, TripleDES
)
from cryptography.hazmat.primitives.ciphers.modes import (
CBC, CFB, CFB8, CTR, ECB, GCM, OFB
)
HashMethods = namedtuple(
"HashMethods", ["ctx", "hash_init", "hash_update", "hash_final"]
)
@utils.register_interface(CipherBackend)
@utils.register_interface(HashBackend)
@utils.register_interface(HMACBackend)
@utils.register_interface(PBKDF2HMACBackend)
class Backend(object):
"""
CommonCrypto API wrapper.
"""
name = "commoncrypto"
def __init__(self):
self._binding = Binding()
self._ffi = self._binding.ffi
self._lib = self._binding.lib
self._cipher_registry = {}
self._register_default_ciphers()
self._hash_mapping = {
"md5": HashMethods(
"CC_MD5_CTX *", self._lib.CC_MD5_Init,
self._lib.CC_MD5_Update, self._lib.CC_MD5_Final
),
"sha1": HashMethods(
"CC_SHA1_CTX *", self._lib.CC_SHA1_Init,
self._lib.CC_SHA1_Update, self._lib.CC_SHA1_Final
),
"sha224": HashMethods(
"CC_SHA256_CTX *", self._lib.CC_SHA224_Init,
self._lib.CC_SHA224_Update, self._lib.CC_SHA224_Final
),
"sha256": HashMethods(
"CC_SHA256_CTX *", self._lib.CC_SHA256_Init,
self._lib.CC_SHA256_Update, self._lib.CC_SHA256_Final
),
"sha384": HashMethods(
"CC_SHA512_CTX *", self._lib.CC_SHA384_Init,
self._lib.CC_SHA384_Update, self._lib.CC_SHA384_Final
),
"sha512": HashMethods(
"CC_SHA512_CTX *", self._lib.CC_SHA512_Init,
self._lib.CC_SHA512_Update, self._lib.CC_SHA512_Final
),
}
self._supported_hmac_algorithms = {
"md5": self._lib.kCCHmacAlgMD5,
"sha1": self._lib.kCCHmacAlgSHA1,
"sha224": self._lib.kCCHmacAlgSHA224,
"sha256": self._lib.kCCHmacAlgSHA256,
"sha384": self._lib.kCCHmacAlgSHA384,
"sha512": self._lib.kCCHmacAlgSHA512,
}
self._supported_pbkdf2_hmac_algorithms = {
"sha1": self._lib.kCCPRFHmacAlgSHA1,
"sha224": self._lib.kCCPRFHmacAlgSHA224,
"sha256": self._lib.kCCPRFHmacAlgSHA256,
"sha384": self._lib.kCCPRFHmacAlgSHA384,
"sha512": self._lib.kCCPRFHmacAlgSHA512,
}
def hash_supported(self, algorithm):
return algorithm.name in self._hash_mapping
def hmac_supported(self, algorithm):
return algorithm.name in self._supported_hmac_algorithms
def create_hash_ctx(self, algorithm):
return _HashContext(self, algorithm)
def create_hmac_ctx(self, key, algorithm):
return _HMACContext(self, key, algorithm)
def cipher_supported(self, cipher, mode):
return (type(cipher), type(mode)) in self._cipher_registry
def create_symmetric_encryption_ctx(self, cipher, mode):
if isinstance(mode, GCM):
return _GCMCipherContext(
self, cipher, mode, self._lib.kCCEncrypt
)
else:
return _CipherContext(self, cipher, mode, self._lib.kCCEncrypt)
def create_symmetric_decryption_ctx(self, cipher, mode):
if isinstance(mode, GCM):
return _GCMCipherContext(
self, cipher, mode, self._lib.kCCDecrypt
)
else:
return _CipherContext(self, cipher, mode, self._lib.kCCDecrypt)
def pbkdf2_hmac_supported(self, algorithm):
return algorithm.name in self._supported_pbkdf2_hmac_algorithms
def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
key_material):
alg_enum = self._supported_pbkdf2_hmac_algorithms[algorithm.name]
buf = self._ffi.new("char[]", length)
res = self._lib.CCKeyDerivationPBKDF(
self._lib.kCCPBKDF2,
key_material,
len(key_material),
salt,
len(salt),
alg_enum,
iterations,
buf,
length
)
self._check_cipher_response(res)
return self._ffi.buffer(buf)[:]
def _register_cipher_adapter(self, cipher_cls, cipher_const, mode_cls,
mode_const):
if (cipher_cls, mode_cls) in self._cipher_registry:
raise ValueError("Duplicate registration for: {0} {1}.".format(
cipher_cls, mode_cls)
)
self._cipher_registry[cipher_cls, mode_cls] = (cipher_const,
mode_const)
def _register_default_ciphers(self):
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(CFB8, self._lib.kCCModeCFB8),
(OFB, self._lib.kCCModeOFB),
(CTR, self._lib.kCCModeCTR),
(GCM, self._lib.kCCModeGCM),
]:
self._register_cipher_adapter(
AES,
self._lib.kCCAlgorithmAES128,
mode_cls,
mode_const
)
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(CFB8, self._lib.kCCModeCFB8),
(OFB, self._lib.kCCModeOFB),
]:
self._register_cipher_adapter(
TripleDES,
self._lib.kCCAlgorithm3DES,
mode_cls,
mode_const
)
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(OFB, self._lib.kCCModeOFB)
]:
self._register_cipher_adapter(
Blowfish,
self._lib.kCCAlgorithmBlowfish,
mode_cls,
mode_const
)
for mode_cls, mode_const in [
(CBC, self._lib.kCCModeCBC),
(ECB, self._lib.kCCModeECB),
(CFB, self._lib.kCCModeCFB),
(OFB, self._lib.kCCModeOFB),
(CTR, self._lib.kCCModeCTR)
]:
self._register_cipher_adapter(
CAST5,
self._lib.kCCAlgorithmCAST,
mode_cls,
mode_const
)
self._register_cipher_adapter(
ARC4,
self._lib.kCCAlgorithmRC4,
type(None),
self._lib.kCCModeRC4
)
def _check_cipher_response(self, response):
if response == self._lib.kCCSuccess:
return
elif response == self._lib.kCCAlignmentError:
# This error is not currently triggered due to a bug filed as
# rdar://15589470
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
else:
raise InternalError(
"The backend returned an unknown error, consider filing a bug."
" Code: {0}.".format(response),
response
)
def _release_cipher_ctx(self, ctx):
"""
Called by the garbage collector and used to safely dereference and
release the context.
"""
if ctx[0] != self._ffi.NULL:
res = self._lib.CCCryptorRelease(ctx[0])
self._check_cipher_response(res)
ctx[0] = self._ffi.NULL
backend = Backend()
| ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/cryptography/hazmat/backends/commoncrypto/backend.py | Python | apache-2.0 | 8,577 |
Subsets and Splits