repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_macpath.py | 62 | 4985 | import macpath
from test import support, test_genericpath
import unittest
class MacPathTestCase(unittest.TestCase):
def test_abspath(self):
self.assertEqual(macpath.abspath("xx:yy"), "xx:yy")
def test_isabs(self):
isabs = macpath.isabs
self.assertTrue(isabs("xx:yy"))
self.assertTrue(isabs("xx:yy:"))
self.assertTrue(isabs("xx:"))
self.assertFalse(isabs("foo"))
self.assertFalse(isabs(":foo"))
self.assertFalse(isabs(":foo:bar"))
self.assertFalse(isabs(":foo:bar:"))
self.assertTrue(isabs(b"xx:yy"))
self.assertTrue(isabs(b"xx:yy:"))
self.assertTrue(isabs(b"xx:"))
self.assertFalse(isabs(b"foo"))
self.assertFalse(isabs(b":foo"))
self.assertFalse(isabs(b":foo:bar"))
self.assertFalse(isabs(b":foo:bar:"))
def test_split(self):
split = macpath.split
self.assertEqual(split("foo:bar"),
('foo:', 'bar'))
self.assertEqual(split("conky:mountpoint:foo:bar"),
('conky:mountpoint:foo', 'bar'))
self.assertEqual(split(":"), ('', ''))
self.assertEqual(split(":conky:mountpoint:"),
(':conky:mountpoint', ''))
self.assertEqual(split(b"foo:bar"),
(b'foo:', b'bar'))
self.assertEqual(split(b"conky:mountpoint:foo:bar"),
(b'conky:mountpoint:foo', b'bar'))
self.assertEqual(split(b":"), (b'', b''))
self.assertEqual(split(b":conky:mountpoint:"),
(b':conky:mountpoint', b''))
def test_join(self):
join = macpath.join
self.assertEqual(join('a', 'b'), ':a:b')
self.assertEqual(join('', 'a:b'), 'a:b')
self.assertEqual(join('a:b', 'c'), 'a:b:c')
self.assertEqual(join('a:b', ':c'), 'a:b:c')
self.assertEqual(join('a', ':b', ':c'), ':a:b:c')
self.assertEqual(join(b'a', b'b'), b':a:b')
self.assertEqual(join(b'', b'a:b'), b'a:b')
self.assertEqual(join(b'a:b', b'c'), b'a:b:c')
self.assertEqual(join(b'a:b', b':c'), b'a:b:c')
self.assertEqual(join(b'a', b':b', b':c'), b':a:b:c')
def test_splitext(self):
splitext = macpath.splitext
self.assertEqual(splitext(":foo.ext"), (':foo', '.ext'))
self.assertEqual(splitext("foo:foo.ext"), ('foo:foo', '.ext'))
self.assertEqual(splitext(".ext"), ('.ext', ''))
self.assertEqual(splitext("foo.ext:foo"), ('foo.ext:foo', ''))
self.assertEqual(splitext(":foo.ext:"), (':foo.ext:', ''))
self.assertEqual(splitext(""), ('', ''))
self.assertEqual(splitext("foo.bar.ext"), ('foo.bar', '.ext'))
self.assertEqual(splitext(b":foo.ext"), (b':foo', b'.ext'))
self.assertEqual(splitext(b"foo:foo.ext"), (b'foo:foo', b'.ext'))
self.assertEqual(splitext(b".ext"), (b'.ext', b''))
self.assertEqual(splitext(b"foo.ext:foo"), (b'foo.ext:foo', b''))
self.assertEqual(splitext(b":foo.ext:"), (b':foo.ext:', b''))
self.assertEqual(splitext(b""), (b'', b''))
self.assertEqual(splitext(b"foo.bar.ext"), (b'foo.bar', b'.ext'))
def test_ismount(self):
ismount = macpath.ismount
self.assertEqual(ismount("a:"), True)
self.assertEqual(ismount("a:b"), False)
self.assertEqual(ismount("a:b:"), True)
self.assertEqual(ismount(""), False)
self.assertEqual(ismount(":"), False)
self.assertEqual(ismount(b"a:"), True)
self.assertEqual(ismount(b"a:b"), False)
self.assertEqual(ismount(b"a:b:"), True)
self.assertEqual(ismount(b""), False)
self.assertEqual(ismount(b":"), False)
def test_normpath(self):
normpath = macpath.normpath
self.assertEqual(normpath("a:b"), "a:b")
self.assertEqual(normpath("a"), ":a")
self.assertEqual(normpath("a:b::c"), "a:c")
self.assertEqual(normpath("a:b:c:::d"), "a:d")
self.assertRaises(macpath.norm_error, normpath, "a::b")
self.assertRaises(macpath.norm_error, normpath, "a:b:::c")
self.assertEqual(normpath(":"), ":")
self.assertEqual(normpath("a:"), "a:")
self.assertEqual(normpath("a:b:"), "a:b")
self.assertEqual(normpath(b"a:b"), b"a:b")
self.assertEqual(normpath(b"a"), b":a")
self.assertEqual(normpath(b"a:b::c"), b"a:c")
self.assertEqual(normpath(b"a:b:c:::d"), b"a:d")
self.assertRaises(macpath.norm_error, normpath, b"a::b")
self.assertRaises(macpath.norm_error, normpath, b"a:b:::c")
self.assertEqual(normpath(b":"), b":")
self.assertEqual(normpath(b"a:"), b"a:")
self.assertEqual(normpath(b"a:b:"), b"a:b")
class MacCommonTest(test_genericpath.CommonTest):
pathmodule = macpath
def test_main():
support.run_unittest(MacPathTestCase, MacCommonTest)
if __name__ == "__main__":
test_main()
| mit |
figment/falloutsnip | Vendor/IronPython/Lib/lib2to3/fixes/fix_renames.py | 326 | 2218 | """Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in MAPPING.items():
for old_attr, new_attr in replace.items():
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)])
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
| gpl-3.0 |
apache/allura | ForgeSVN/forgesvn/model/svn.py | 2 | 31204 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import re
import os
import shutil
import string
import logging
import subprocess
import time
import operator as op
from subprocess import Popen, PIPE
from hashlib import sha1
from io import BytesIO
from datetime import datetime
import tempfile
from shutil import rmtree
import six
import tg
import pysvn
from paste.deploy.converters import asbool, asint
from pymongo.errors import DuplicateKeyError
from tg import tmpl_context as c, app_globals as g
from ming.base import Object
from ming.orm import Mapper, FieldProperty
from ming.utils import LazyProperty
from allura import model as M
from allura.lib import helpers as h
from allura.model.auth import User
from allura.model.repository import zipdir
from allura.model import repository as RM
from io import open
from six.moves import range
from six.moves import map
log = logging.getLogger(__name__)
class Repository(M.Repository):
tool_name = 'SVN'
repo_id = 'svn'
type_s = 'SVN Repository'
class __mongometa__:
name = str('svn-repository')
branches = FieldProperty([dict(name=str, object_id=str)])
_refresh_precompute = False
@LazyProperty
def _impl(self):
return SVNImplementation(self)
def latest(self, branch=None):
if self._impl is None:
return None
return self._impl.commit('HEAD')
def tarball_filename(self, revision, path=None):
fn = super(Repository, self).tarball_filename('r'+revision, path)
path = self._impl._tarball_path_clean(path, revision)
fn += ('-' + '-'.join(path.split('/'))) if path else ''
return fn
def rev_to_commit_id(self, rev):
return self._impl.rev_parse(rev)
class SVNCalledProcessError(Exception):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command: '%s' returned non-zero exit status %s\nSTDOUT: %s\nSTDERR: %s" % \
(self.cmd, self.returncode, self.stdout, self.stderr)
def svn_path_exists(path, rev=None):
svn = SVNLibWrapper(pysvn.Client())
if rev:
rev = pysvn.Revision(pysvn.opt_revision_kind.number, rev)
else:
rev = pysvn.Revision(pysvn.opt_revision_kind.head)
try:
svn.info2(path, revision=rev, recurse=False)
return True
except pysvn.ClientError:
return False
class SVNLibWrapper(object):
"""Wrapper around pysvn, used for instrumentation."""
def __init__(self, client):
self.client = client
def checkout(self, *args, **kw):
return self.client.checkout(*args, **kw)
def add(self, *args, **kw):
return self.client.add(*args, **kw)
def checkin(self, *args, **kw):
return self.client.checkin(*args, **kw)
def info2(self, *args, **kw):
return self.client.info2(*args, **kw)
def log(self, *args, **kw):
return self.client.log(*args, **kw)
def cat(self, *args, **kw):
return self.client.cat(*args, **kw)
def list(self, *args, **kw):
return self.client.list(*args, **kw)
def __getattr__(self, name):
return getattr(self.client, name)
class SVNImplementation(M.RepositoryImplementation):
post_receive_template = string.Template(
'#!/bin/bash\n'
'# The following is required for site integration, do not remove/modify.\n'
'# Place user hook code in post-commit-user and it will be called from here.\n'
'curl -s $url\n'
'\n'
'DIR="$$(dirname "$${BASH_SOURCE[0]}")"\n'
'if [ -x $$DIR/post-commit-user ]; then'
' exec $$DIR/post-commit-user "$$@"\n'
'fi')
def __init__(self, repo):
self._repo = repo
@LazyProperty
def _svn(self):
return SVNLibWrapper(pysvn.Client())
@LazyProperty
def _url(self):
return 'file://%s%s' % (self._repo.fs_path, self._repo.name)
def shorthand_for_commit(self, oid):
return '[r%d]' % self._revno(self.rev_parse(oid))
def url_for_commit(self, commit, url_type=None):
if hasattr(commit, '_id'):
object_id = commit._id
elif commit == self._repo.app.default_branch_name:
object_id = commit
else:
object_id = self.rev_parse(commit)
if ':' in object_id:
object_id = str(self._revno(object_id))
return os.path.join(self._repo.url(), object_id) + '/'
def init(self, default_dirs=False, skip_special_files=False):
fullname = self._setup_paths()
log.info('svn init %s', fullname)
if os.path.exists(fullname):
shutil.rmtree(fullname)
subprocess.call(['svnadmin', 'create', self._repo.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._repo.fs_path)
if not skip_special_files:
self._setup_special_files()
self._repo.set_status('ready')
# make first commit with dir structure
if default_dirs:
tmp_working_dir = tempfile.mkdtemp(prefix='allura-svn-r1-',
dir=tg.config.get('scm.svn.tmpdir', g.tmpdir))
log.info('tmp dir = %s', tmp_working_dir)
self._repo._impl._svn.checkout(
'file://' + fullname, tmp_working_dir)
os.mkdir(tmp_working_dir + '/trunk')
os.mkdir(tmp_working_dir + '/tags')
os.mkdir(tmp_working_dir + '/branches')
self._repo._impl._svn.add(tmp_working_dir + '/trunk')
self._repo._impl._svn.add(tmp_working_dir + '/tags')
self._repo._impl._svn.add(tmp_working_dir + '/branches')
self._repo._impl._svn.checkin([tmp_working_dir + '/trunk',
tmp_working_dir + '/tags',
tmp_working_dir + '/branches'],
'Initial commit')
shutil.rmtree(tmp_working_dir)
log.info('deleted %s', tmp_working_dir)
def can_hotcopy(self, source_url):
if not (asbool(tg.config.get('scm.svn.hotcopy', True)) and
source_url.startswith('file://')):
return False
# check for svn version 1.7 or later
stdout, stderr, returncode = self.check_call(['svn', '--version'])
pattern = r'version (?P<maj>\d+)\.(?P<min>\d+)'
m = re.search(pattern, six.ensure_text(stdout))
return m and (int(m.group('maj')) * 10 + int(m.group('min'))) >= 17
def check_call(self, cmd, fail_on_error=True):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input=b'p\n')
if p.returncode != 0 and fail_on_error:
self._repo.set_status('ready')
raise SVNCalledProcessError(cmd, p.returncode, stdout, stderr)
return stdout, stderr, p.returncode
def clone_from(self, source_url):
'''Initialize a repo as a clone of another using svnsync'''
self.init(skip_special_files=True)
def set_hook(hook_name):
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', hook_name)
with open(fn, 'w') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0o755)
def clear_hook(hook_name):
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', hook_name)
os.remove(fn)
self._repo.set_status('importing')
log.info('Initialize %r as a clone of %s',
self._repo, source_url)
if self.can_hotcopy(source_url):
log.info('... cloning %s via hotcopy', source_url)
# src repo is on the local filesystem - use hotcopy (faster)
source_path, dest_path = source_url[7:], self._url[7:]
fullname = os.path.join(self._repo.fs_path, self._repo.name)
# hotcopy expects dest dir to not exist yet
if os.path.exists(fullname):
shutil.rmtree(fullname)
self.check_call(['svnadmin', 'hotcopy', source_path, dest_path])
# make sure new repo has a pre-revprop-change hook,
# otherwise the sync will fail
set_hook('pre-revprop-change')
self.check_call(
['svnsync', '--non-interactive', '--allow-non-empty',
'initialize', self._url, source_url])
clear_hook('pre-revprop-change')
else:
def retry_cmd(cmd, fail_count=0):
max_fail = asint(tg.config.get('scm.import.retry_count', 50))
returncode = -1
while returncode != 0 and fail_count < max_fail:
stdout, stderr, returncode = self.check_call(cmd, fail_on_error=False)
if returncode != 0:
fail_count += 1
log.info('Attempt %s. Error running %s Details:\n%s', fail_count, cmd, stderr)
time.sleep(asint(tg.config.get('scm.import.retry_sleep_secs', 5)))
if fail_count == max_fail:
raise SVNCalledProcessError(cmd, returncode, stdout, stderr)
return fail_count
set_hook('pre-revprop-change')
fail_count = retry_cmd(['svnsync', 'init', self._url, source_url])
fail_count = retry_cmd(['svnsync', '--non-interactive', 'sync', self._url], fail_count=fail_count)
clear_hook('pre-revprop-change')
log.info('... %r cloned', self._repo)
self.update_checkout_url()
self._setup_special_files(source_url)
def update_checkout_url(self):
"""Validate the current ``checkout_url`` against the on-disk repo,
and change it if necessary.
If ``checkout_url`` is valid and not '', no changes are made.
If ``checkout_url`` is invalid or '':
- Set it to 'trunk' if repo has a top-level trunk directory
- Else, set it to ''
"""
opts = self._repo.app.config.options
if not svn_path_exists('file://{0}{1}/{2}'.format(self._repo.fs_path,
self._repo.name, opts['checkout_url'])):
opts['checkout_url'] = ''
if (not opts['checkout_url'] and
svn_path_exists(
'file://{0}{1}/trunk'.format(self._repo.fs_path,
self._repo.name))):
opts['checkout_url'] = 'trunk'
def commit(self, rev):
oid = self.rev_parse(rev)
result = M.repository.Commit.query.get(_id=oid)
if result:
result.set_context(self._repo)
return result
def rev_parse(self, rev):
if rev in ('HEAD', None):
return self._oid(self.head)
elif isinstance(rev, int) or rev.isdigit():
return self._oid(rev)
else:
return rev
def all_commit_ids(self):
"""Return a list of commit ids, starting with the head (most recent
commit) and ending with the root (first commit).
"""
head_revno = self.head
return list(map(self._oid, list(range(head_revno, 0, -1))))
def new_commits(self, all_commits=False):
head_revno = self.head
oids = [self._oid(revno) for revno in range(1, head_revno + 1)]
if all_commits:
return oids
# Find max commit id -- everything greater than that will be "unknown"
prefix = self._oid('')
q = M.repository.Commit.query.find(
dict(
type='commit',
_id={'$gt': prefix},
),
dict(_id=True)
)
seen_oids = set()
for d in q.ming_cursor.cursor:
oid = d['_id']
if not oid.startswith(prefix):
break
seen_oids.add(oid)
return [o for o in oids if o not in seen_oids]
def refresh_commit_info(self, oid, seen_object_ids, lazy=True):
from allura.model.repository import CommitDoc
ci_doc = CommitDoc.m.get(_id=oid)
if ci_doc and lazy:
return False
revno = self._revno(oid)
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty',
oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
log_date = None
if hasattr(log_entry, 'date'):
log_date = datetime.utcfromtimestamp(log_entry.date)
user = Object(
name=h.really_unicode(log_entry.get('author', '--none--')),
email='',
date=log_date)
args = dict(
tree_id=None,
committed=user,
authored=user,
message=h.really_unicode(log_entry.get("message", "--none--")),
parent_ids=[],
child_ids=[])
if revno > 1:
args['parent_ids'] = [self._oid(revno - 1)]
if ci_doc:
ci_doc.update(**args)
ci_doc.m.save()
else:
ci_doc = CommitDoc(dict(args, _id=oid))
try:
ci_doc.m.insert()
except DuplicateKeyError:
if lazy:
return False
return True
def compute_tree_new(self, commit, tree_path='/'):
# always leading slash, never trailing
tree_path = '/' + tree_path.strip('/')
tree_id = self._tree_oid(commit._id, tree_path)
tree = RM.Tree.query.get(_id=tree_id)
if tree:
return tree_id
log.debug('Computing tree for %s: %s',
self._revno(commit._id), tree_path)
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for: %s: %s(%s)',
self._repo, commit, tree_path)
return None
log.debug('Compute tree for %d paths', len(infos))
tree_ids = []
blob_ids = []
lcd_entries = []
for path, info in infos[1:]:
if info.kind == pysvn.node_kind.dir:
tree_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
elif info.kind == pysvn.node_kind.file:
blob_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
else:
raise AssertionError()
lcd_entries.append(dict(
name=path,
commit_id=self._oid(info.last_changed_rev.number),
))
tree, is_new = RM.Tree.upsert(tree_id,
tree_ids=tree_ids,
blob_ids=blob_ids,
other_ids=[],
)
if is_new:
commit_id = self._oid(infos[0][1].last_changed_rev.number)
path = tree_path.strip('/')
RM.LastCommitDoc.m.update_partial(
{'commit_id': commit_id, 'path': path},
{'commit_id': commit_id, 'path':
path, 'entries': lcd_entries},
upsert=True)
return tree_id
def _tree_oid(self, commit_id, path):
data = 'tree\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _blob_oid(self, commit_id, path):
data = 'blob\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _obj_oid(self, commit_id, info):
path = info.URL[len(info.repos_root_URL):]
if info.kind == pysvn.node_kind.dir:
return self._tree_oid(commit_id, path)
else:
return self._blob_oid(commit_id, path)
def log(self, revs=None, path=None, exclude=None, id_only=True, limit=25, **kw):
"""
Returns a generator that returns information about commits reachable
by revs.
revs can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If revs is None, the
default head will be used.
If path is not None, only commits which modify files under path
will be included.
Exclude can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If not None, then any
revisions reachable by any of the revisions in exclude will not be
included.
If id_only is True, returns only the commit ID, otherwise it returns
detailed information about each commit.
Since pysvn doesn't have a generator version of log, this tries to
balance pulling too much data from SVN with calling SVN too many
times by pulling in pages of page_size at a time.
"""
if revs is None:
revno = self.head
else:
revno = max([self._revno(self.rev_parse(r)) for r in revs])
if exclude is None:
exclude = 0
else:
exclude = max([self._revno(self.rev_parse(r)) for r in exclude])
if path is None:
url = self._url
else:
url = '/'.join([self._url, path.strip('/')])
while revno > exclude:
rev = pysvn.Revision(pysvn.opt_revision_kind.number, revno)
try:
logs = self._svn.log(
url, revision_start=rev, peg_revision=rev, limit=limit,
discover_changed_paths=True)
except pysvn.ClientError as e:
if 'Unable to connect' in e.args[0]:
raise # repo error
return # no (more) history for this path
for ci in logs:
if ci.revision.number <= exclude:
return
if id_only:
yield ci.revision.number
else:
yield self._map_log(ci, url, path)
if len(logs) < limit:
# we didn't get a full page, don't bother calling SVN again
return
revno = ci.revision.number - 1
def _check_changed_path(self, changed_path, path):
if (changed_path['copyfrom_path'] and
changed_path['path'] and
path and
(len(changed_path['path']) < len(path)) and
path.startswith(changed_path['path'])):
changed_path['copyfrom_path'] = changed_path['copyfrom_path'] + \
path[len(changed_path['path']):]
changed_path['path'] = path
return changed_path
def _map_log(self, ci, url, path=None):
revno = ci.revision.number
rev = pysvn.Revision(pysvn.opt_revision_kind.number, revno)
size = None
if path:
try:
size = self._svn.list(url, revision=rev, peg_revision=rev)[0][0].size
except pysvn.ClientError:
pass
rename_details = {}
changed_paths = ci.get('changed_paths', [])
for changed_path in changed_paths:
changed_path = self._check_changed_path(changed_path, path)
if changed_path['copyfrom_path'] and changed_path['path'] == path and changed_path['action'] == 'A':
rename_details['path'] = changed_path['copyfrom_path']
rename_details['commit_url'] = self._repo.url_for_commit(
changed_path['copyfrom_revision'].number
)
break
return {
'id': revno,
'message': h.really_unicode(ci.get('message', '--none--')),
'authored': {
'name': h.really_unicode(ci.get('author', '--none--')),
'email': '',
'date': datetime.utcfromtimestamp(ci.date),
},
'committed': {
'name': h.really_unicode(ci.get('author', '--none--')),
'email': '',
'date': datetime.utcfromtimestamp(ci.date),
},
'refs': ['HEAD'] if revno == self.head else [],
'parents': [revno - 1] if revno > 1 else [],
'size': size,
'rename_details': rename_details,
}
def open_blob(self, blob):
data = self._svn.cat(
self._url + h.urlquote(blob.path()),
revision=self._revision(blob.commit._id))
return BytesIO(data)
def blob_size(self, blob):
try:
rev = self._revision(blob.commit._id)
data = self._svn.list(
self._url + blob.path(),
revision=rev,
peg_revision=rev,
dirent_fields=pysvn.SVN_DIRENT_SIZE)
except pysvn.ClientError:
log.info('ClientError getting filesize %r %r, returning 0',
blob.path(), self._repo, exc_info=True)
return 0
try:
size = data[0][0]['size']
except (IndexError, KeyError):
log.info(
'Error getting filesize: bad data from svn client %r %r, returning 0',
blob.path(), self._repo, exc_info=True)
size = 0
return size
def _setup_hooks(self, source_path=None):
'Set up the post-commit and pre-revprop-change hooks'
# setup a post-commit hook to notify Allura of changes to the repo
# the hook should also call the user-defined post-commit-user hook
text = self.post_receive_template.substitute(
url=self._repo.refresh_url())
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', 'post-commit')
with open(fn, 'w') as fp:
fp.write(text)
os.chmod(fn, 0o755)
def _revno(self, oid):
return int(oid.split(':')[1])
def _revision(self, oid):
return pysvn.Revision(
pysvn.opt_revision_kind.number,
self._revno(oid))
def _oid(self, revno):
return '%s:%s' % (self._repo._id, revno)
def last_commit_ids(self, commit, paths):
'''
Return a mapping {path: commit_id} of the _id of the last
commit to touch each path, starting from the given commit.
Since SVN Diffs are computed on-demand, we can't walk the
commit tree to find these. However, we can ask SVN for it
with a single call, so it shouldn't be too expensive.
NB: This assumes that all paths are direct children of a
single common parent path (i.e., you are only asking for
a subset of the nodes of a single tree, one level deep).
'''
if len(paths) == 1:
tree_path = '/' + os.path.dirname(paths[0].strip('/'))
else:
# always leading slash, never trailing
tree_path = '/' + os.path.commonprefix(paths).strip('/')
paths = [path.strip('/') for path in paths]
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for: %s: %s(%s)',
self._repo, commit, tree_path)
return None
entries = {}
for path, info in infos[1:]:
path = os.path.join(tree_path, path).strip('/')
if path in paths:
entries[path] = self._oid(info.last_changed_rev.number)
return entries
def get_changes(self, oid):
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty',
oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
return [p.path for p in log_entry.changed_paths]
def _tarball_path_clean(self, path, rev=None):
if path:
return path.strip('/')
else:
trunk_exists = svn_path_exists('file://%s%s/%s' % (self._repo.fs_path, self._repo.name, 'trunk'), rev)
if trunk_exists:
return 'trunk'
return ''
def tarball(self, commit, path=None):
"""
Makes a svn export at `tmpdest`
then zips that into `dest/tmpfilename`
then renames that to `dest/filename`
"""
path = self._tarball_path_clean(path, commit)
if not os.path.exists(self._repo.tarball_path):
os.makedirs(self._repo.tarball_path)
if not os.path.exists(self._repo.tarball_tmpdir):
os.makedirs(self._repo.tarball_tmpdir)
archive_name = self._repo.tarball_filename(commit, path)
dest = os.path.join(self._repo.tarball_path, archive_name)
tmpdest = os.path.join(self._repo.tarball_tmpdir, archive_name)
filename = os.path.join(self._repo.tarball_path, '%s%s' % (archive_name, '.zip')).encode('utf-8')
tmpfilename = os.path.join(self._repo.tarball_path, '%s%s' % (archive_name, '.tmp')).encode('utf-8')
rmtree(dest.encode('utf8'), ignore_errors=True) # must encode into bytes or it'll fail on non-ascii filenames
rmtree(tmpdest.encode('utf8'), ignore_errors=True)
path = os.path.join(self._url, path)
try:
# need to set system locale to handle all symbols in filename
import locale
locale.setlocale(locale.LC_ALL, str('en_US.UTF-8'))
self._svn.export(path,
tmpdest,
revision=pysvn.Revision(
pysvn.opt_revision_kind.number, commit),
ignore_externals=True)
zipdir(tmpdest, tmpfilename)
os.rename(tmpfilename, filename)
finally:
rmtree(dest.encode('utf8'), ignore_errors=True)
rmtree(tmpdest.encode('utf8'), ignore_errors=True)
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
def is_empty(self):
return self.head == 0
def is_file(self, path, rev=None):
url = '/'.join([self._url, path.strip('/')])
rev = pysvn.Revision(pysvn.opt_revision_kind.number,
self._revno(self.rev_parse(rev)))
try:
info = self._svn.list(
url, revision=rev, peg_revision=rev, dirent_fields=pysvn.SVN_DIRENT_KIND)[0][0]
return info.kind == pysvn.node_kind.file
except pysvn.ClientError:
return False
def symbolics_for_commit(self, commit):
return [], []
@LazyProperty
def head(self):
try:
return int(self._svn.revpropget('revision', url=self._url)[0].number)
except pysvn.ClientError as e:
error_lines = str(e).splitlines()
if all(errline.startswith(("Unable to connect", "Unable to open")) for errline in error_lines):
# simple common error e.g. empty repo directory
return 0
else:
raise
@LazyProperty
def heads(self):
return [Object(name=None, object_id=self._oid(self.head))]
@LazyProperty
def branches(self):
return []
@LazyProperty
def tags(self):
return []
def paged_diffs(self, commit_id, start=0, end=None, onlyChangedFiles=False):
result = {'added': [], 'removed': [], 'changed': [], 'copied': [], 'renamed': [], 'total': 0}
rev = self._revision(commit_id)
try:
log_info = self._svn.log(
self._url,
revision_start=rev,
revision_end=rev,
discover_changed_paths=True)
except pysvn.ClientError:
log.info('Error getting paged_diffs log of %s on %s',
commit_id, self._url, exc_info=True)
return result
if len(log_info) == 0:
return result
paths = sorted(log_info[0].changed_paths, key=op.itemgetter('path'))
result['total'] = len(paths)
for p in paths[start:end]:
if p['copyfrom_path'] is not None:
result['copied'].append({
'new': h.really_unicode(p.path),
'old': h.really_unicode(p.copyfrom_path),
'ratio': 1,
})
elif p['action'] == 'A':
result['added'].append(h.really_unicode(p.path))
elif p['action'] == 'D':
result['removed'].append(h.really_unicode(p.path))
elif p['action'] in ['M', 'R']:
# 'R' means 'Replaced', i.e.
# svn rm aaa.txt
# echo "Completely new aaa!" > aaa.txt
# svn add aaa.txt
# svn commit -m "Replace aaa.txt"
result['changed'].append(h.really_unicode(p.path))
for r in result['copied'][:]:
if r['old'] in result['removed']:
result['removed'].remove(r['old'])
result['copied'].remove(r)
result['renamed'].append(r)
if r['new'] in result['added']:
result['added'].remove(r['new'])
return result
Mapper.compile_all()
| apache-2.0 |
TheMOOCAgency/edx-platform | lms/djangoapps/grades/config/forms.py | 16 | 1207 | """
Defines a form for providing validation of subsection grade templates.
"""
import logging
from django import forms
from lms.djangoapps.grades.config.models import CoursePersistentGradesFlag
from opaque_keys import InvalidKeyError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locator import CourseLocator
log = logging.getLogger(__name__)
class CoursePersistentGradesAdminForm(forms.ModelForm):
"""Input form for subsection grade enabling, allowing us to verify data."""
class Meta(object):
model = CoursePersistentGradesFlag
fields = '__all__'
def clean_course_id(self):
"""Validate the course id"""
cleaned_id = self.cleaned_data["course_id"]
try:
course_key = CourseLocator.from_string(cleaned_id)
except InvalidKeyError:
msg = u'Course id invalid. Entered course id was: "{0}."'.format(cleaned_id)
raise forms.ValidationError(msg)
if not modulestore().has_course(course_key):
msg = u'Course not found. Entered course id was: "{0}". '.format(course_key.to_deprecated_string())
raise forms.ValidationError(msg)
return course_key
| agpl-3.0 |
mayblue9/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
nearlyfreeapps/python-googleadwords | examples/adspygoogle/adwords/v201109/basic_operations/get_campaigns.py | 1 | 2192 | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all campaigns. To add a campaign, run add_campaign.py.
Tags: CampaignService.get
"""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
PAGE_SIZE = 100
def main(client):
# Initialize appropriate service.
campaign_service = client.GetCampaignService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct selector and get all campaigns.
offset = 0
selector = {
'fields': ['Id', 'Name', 'Status'],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = campaign_service.Get(selector)[0]
# Display results.
if 'entries' in page:
for campaign in page['entries']:
print ('Campaign with id \'%s\', name \'%s\', and status \'%s\' was '
'found.' % (campaign['id'], campaign['name'],
campaign['status']))
else:
print 'No campaigns were found.'
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client)
| apache-2.0 |
klenks/jobsportal | venv/bin/activate_this.py | 1076 | 1137 | """By using execfile(this_file, dict(__file__=this_file)) you will
activate this virtualenv environment.
This can be used when you must use an existing Python interpreter, not
the virtualenv bin/python
"""
try:
__file__
except NameError:
raise AssertionError(
"You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
import sys
import os
old_os_path = os.environ.get('PATH', '')
os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if sys.platform == 'win32':
site_packages = os.path.join(base, 'Lib', 'site-packages')
else:
site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
prev_sys_path = list(sys.path)
import site
site.addsitedir(site_packages)
sys.real_prefix = sys.prefix
sys.prefix = base
# Move the added items to the front of the path:
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
| mit |
40223133/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minidom.py | 727 | 66854 | """Simple implementation of the Level 1 DOM.
Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import io
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __bool__(self):
return True
def toxml(self, encoding=None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding=None):
if encoding is None:
writer = io.StringIO()
else:
writer = io.TextIOWrapper(io.BytesIO(),
encoding=encoding,
errors="xmlcharrefreplace",
newline='\n')
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
if encoding is None:
return writer.getvalue()
else:
return writer.detach().getvalue()
def hasChildNodes(self):
return bool(self.childNodes)
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
if not child.data:
# empty text node; discard
if L:
L[-1].nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = child.previousSibling
child.unlink()
elif L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
if child.nextSibling:
child.nextSibling.previousSibling = node
child.unlink()
else:
L.append(child)
else:
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in list(self._user_data.items()):
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
# A Node is its own context manager, to ensure that an unlink() call occurs.
# This is similar to how a file object works.
def __enter__(self):
return self
def __exit__(self, et, ev, tb):
self.unlink()
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.previousSibling = last
last.nextSibling = node
childNodes.append(node)
node.parentNode = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
__slots__=('_name', '_value', 'namespaceURI',
'_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement')
nodeType = Node.ATTRIBUTE_NODE
attributes = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
self.ownerElement = None
self._name = qName
self.namespaceURI = namespaceURI
self._prefix = prefix
self.childNodes = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def _get_name(self):
return self._name
def _set_name(self, value):
self._name = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
nodeName = name = property(_get_name, _set_name)
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.childNodes[0].data = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
nodeValue = value = property(_get_value, _set_value)
def _get_prefix(self):
return self._prefix
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
self._prefix = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.name = newName
prefix = property(_get_prefix, _set_prefix)
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[list(self._attrs.keys())[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def __contains__(self, key):
if isinstance(key, str):
return key in self._attrs
else:
return key in self._attrsNS
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
def _cmp(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return (id(self) > id(other)) - (id(self) < id(other))
def __eq__(self, other):
return self._cmp(other) == 0
def __ge__(self, other):
return self._cmp(other) >= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __le__(self, other):
return self._cmp(other) <= 0
def __lt__(self, other):
return self._cmp(other) < 0
def __ne__(self, other):
return self._cmp(other) != 0
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, str):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError("value must be a string or Attr object")
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if hasattr(n, 'ownerElement'):
n.ownerElement = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
__slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix',
'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS',
'nextSibling', 'previousSibling')
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.parentNode = None
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self.nextSibling = self.previousSibling = None
# Attribute dictionaries are lazily created
# attributes are double-indexed:
# tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
self._attrs = None
self._attrsNS = None
def _ensure_attributes(self):
if self._attrs is None:
self._attrs = {}
self._attrsNS = {}
def _get_localName(self):
try:
return self._localName
except AttributeError:
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
if self._attrs is not None:
for attr in list(self._attrs.values()):
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
if self._attrs is None:
return ""
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return ""
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
attr.value = value # also sets nodeValue
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
attr.value = value
attr.ownerDocument = self.ownerDocument
self.setAttributeNode(attr)
else:
if value != attr.value:
attr.value = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
attr.prefix = prefix
attr.nodeName = qualifiedName
def getAttributeNode(self, attrname):
if self._attrs is None:
return None
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return None
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
self._ensure_attributes()
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
raise xml.dom.NotFoundErr()
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
if self._attrs is None:
return False
return name in self._attrs
def hasAttributeNS(self, namespaceURI, localName):
if self._attrsNS is None:
return False
return (namespaceURI, localName) in self._attrsNS
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = sorted(attrs.keys())
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and
self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent+addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
self._ensure_attributes()
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr._is_id = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._ensure_attributes()
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.ownerElement = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
__slots__ = ()
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def normalize(self):
# For childless nodes, normalize() has nothing to do.
pass
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
__slots__ = ('target', 'data')
def __init__(self, target, data):
self.target = target
self.data = data
# nodeValue is an alias for data
def _get_nodeValue(self):
return self.data
def _set_nodeValue(self, value):
self.data = data
nodeValue = property(_get_nodeValue, _set_nodeValue)
# nodeName is an alias for target
def _get_nodeName(self):
return self.target
def _set_nodeName(self, value):
self.target = value
nodeName = property(_get_nodeName, _set_nodeName)
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
__slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling')
def __init__(self):
self.ownerDocument = self.parentNode = None
self.previousSibling = self.nextSibling = None
self._data = ''
Node.__init__(self)
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
data = nodeValue = property(_get_data, _set_data)
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
__slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s" % (indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
self.data = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
CharacterData.__init__(self)
self._data = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
__slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError(name_or_tuple)
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
__slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribute is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
__slots__ = ('_elem_info', 'doctype',
'_id_search_stack', 'childNodes', '_id_cache')
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
implementation = DOMImplementation()
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
parentNode = None
previousSibling = nextSibling = None
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.doctype = None
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, str):
raise TypeError("node contents must be a string")
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="", encoding=None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (
encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
n.prefix = prefix
n._localName = localName
n.namespaceURI = namespaceURI
n.nodeName = name
if n.nodeType == Node.ELEMENT_NODE:
n.tagName = name
else:
# attribute node
n.name = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, str):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
| gpl-3.0 |
cts2/rf2service | server/Server.py | 1 | 3285 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from rf2db.utils import urlutil
from rf2db.db.RF2DBConnection import cp_values
from rf2db.db.RF2FileCommon import rf2_values
from server.BaseNode import BaseNode, expose, xmlVal, htmlHead
html = htmlHead + """
<html>
<head>
<title>RF2 Server Configuration</title>
</head>
<body>
<h1>Database Configuration</h1>
<table border="1">
<tr>
<td>Host</td>
<td>%(host)s
</tr>
<tr>
<td>Port</td>
<td>%(port)s</td>
</tr>
<tr>
<td>DB</td>
<td>%(db)s</td>
</tr>
<tr>
<td>Charset</td>
<td>%(charset)s</td>
</tr>
</table>
<h1>URL Settings</h1>
<table border="1">
<tr>
<td>Host</td>
<td>%(href_host)s</td>
</tr>
<tr>
<td>Root</td>
<td>%(href_root)s</td>
</tr>
<tr>
<td>Relative URI</td>
<td>%(reluri)s</td>
</tr>
<tr>
<td>Base URI</td>
<td>%(baseuri)s</td>
</tr>
<tr>
<td>Complete URI</td>
<td>%(completeuri)s</td>
</tr>
</table>
</body>
</html>"""
class ServerConf(BaseNode):
namespace = ''
@expose
def default(self, *args, **kwargs):
host = cp_values.host
port = cp_values.port
db = cp_values.db
charset = cp_values.charset
href_host = urlutil.href_settings.host
href_root = urlutil.href_settings.root
reluri = urlutil.relative_uri()
baseuri = urlutil.base_uri()
completeuri = urlutil.complete_uri()
return html % vars()
@expose
def status(self, *args, **kwargs):
return (xmlVal % ('<status>OK</status><rf2_release>%s</rf2_release>' % rf2_values.release),
(0, None))
| bsd-3-clause |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/numpy/random/setupscons.py | 100 | 1384 | import glob
from os.path import join, split
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random',parent_package,top_path)
source_files = [join('mtrand', i) for i in ['mtrand.c',
'mtrand.pyx',
'numpy.pxi',
'randomkit.c',
'randomkit.h',
'Python.pxi',
'initarray.c',
'initarray.h',
'distributions.c',
'distributions.h',
]]
config.add_sconscript('SConstruct', source_files = source_files)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| gpl-3.0 |
rodrigob/fuel | fuel/datasets/iris.py | 6 | 1038 | from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
class Iris(H5PYDataset):
u"""Iris dataset.
Iris [LBBH] is a simple pattern recognition dataset, which consist of
3 classes of 50 examples each having 4 real-valued features each, where
each class refers to a type of iris plant. It is accessible through the
UCI Machine Learning repository [UCI].
.. [IRIS] Ronald A. Fisher, *The use of multiple measurements in
taxonomic problems*, Annual Eugenics, 7, Part II, 179-188,
September 1936.
.. [UCI] https://archive.ics.uci.edu/ml/datasets/Iris
Parameters
----------
which_sets : tuple of str
Which split to load. Valid value is 'all'
corresponding to 150 examples.
"""
filename = 'iris.hdf5'
def __init__(self, which_sets, **kwargs):
kwargs.setdefault('load_in_memory', True)
super(Iris, self).__init__(
file_or_path=find_in_data_path(self.filename),
which_sets=which_sets, **kwargs)
| mit |
vipints/oqtans | oqtans_tools/KIRMES/0.8/src/kmotif.py | 2 | 10908 | """
#######################################################################################
# #
# kmotif.py is a command-line front-end to the KIRMES pipeline #
# BibTeX entries below. Please cite: #
# Sebastian J. Schulheiss, Wolfgang Busch, Jan U. Lohmann, Oliver Kohlbacher, #
# and Gunnar Raetsch (2009) KIRMES: Kernel-based identification of regulatory #
# modules in euchromatic sequences. Bioinformatics 16(25):2126-33. #
# #
# Copyright (C) 2007-20010 Sebastian J. Schultheiss <[email protected]> #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#######################################################################################
# #
# Original Author: Sebastian J. Schultheiss, version 0.8.0 #
# Please add a notice of any modifications here: #
# #
# #
#######################################################################################
"""
__version__ = "0.8.0"
__license__ = "GNU General Public License"
# bibtex entry
__author__ = """
@article{Schultheiss2009KIRMES,
author = {Sebastian J. Schultheiss and Wolfgang Busch and Jan U. Lohmann and Oliver Kohlbacher and Gunnar Raetsch},
title = {{KIRMES}: Kernel-based identification of regulatory modules in euchromatic sequences},
year = {2009},
journal = {Bioinformatics},
publisher = {Oxford Journals},
volume = {25},
issue = {16},
pages = {2126--33},
month = {April},
doi = {10.1093/bioinformatics/btp278},
abstract = {Motivation: Understanding transcriptional regulation is one of the
main challenges in computational biology. An important problem is
the identification of transcription factor binding sites in promoter
regions of potential transcription factor target genes. It is
typically approached by position weight matrix-based motif
identification algorithms using Gibbs sampling, or heuristics to
extend seed oligos. Such algorithms succeed in identifying single,
relatively well-conserved binding sites, but tend to fail when it
comes to the identification of combinations of several degenerate
binding sites, as those often found in cis-regulatory modules.
Results: We propose a new algorithm that combines the benefits of
existing motif finding with the ones of Support Vector Machines (SVMs)
to find degenerate motifs in order to improve the modeling of
regulatory modules. In experiments on microarray data from Arabidopsis
thaliana, we were able to show that the newly developed strategy
significantly improves the recognition of transcription factor targets.
Availability: The PYTHON source code (open source-licensed under GPL),
the data for the experiments and a Galaxy-based web service are
available at http://www.fml.mpg.de/raetsch/projects/kirmes.
Contact: [email protected]},
URL = {http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btp278v1}
}
"""
__usage__ = """Usage:
To find motifs in 2 FASTA files, supply positives and negatives:
%prog -t -p positives.fasta -n negatives.fasta [options]
"""
# system imports
import os
from optparse import OptionParser, OptionValueError
import sys
from shutil import copy
try:
# own imports
import kirmes_ini
from Inclusive import Inclusive
from Kmers import Kmers, createIMMFile
except ImportError:
print "ImportError:"
print "One of the required imports failed, please make sure the files "
print "kirmes_ini.py or KIRMES_INI.pyc, "
print "DBTools.py or DBTools.pyc and FileTools.py or FileTools.pyc"
print "are present in the current directory, or download this program again."
raise
def check_file(option, opt_str, value, _parser):
"""See if a file exists on the file system, raises an OptionValueError"""
if value == "None":
value = None
elif not os.path.isfile(value):
raise OptionValueError("Cannot open %s as a file. Please check if it exists." % value)
setattr(_parser.values, option.dest, value)
def check_pickle(option, opt_str, value, _parser):
"""Check for the kernel file in testing mode"""
if not _parser.values.train:
check_file(option, opt_str, value, _parser)
else:
setattr(_parser.values, option.dest, value)
def optionparse(parser):
"""Completes the option parser object, adding defaults and options"""
parser.add_option("-t", "--type", type = "string", dest = "type",
help = "motif finding strategy to use: MotifSampler (kims), PRIORITY (krgp), k-mer (kkmc), or just the locator, must supply a valid imm file (kiml) [default %default]")
parser.add_option("-p", "--positives", dest = "positives",
action = "callback", callback = check_file, type = "string",
help="path to the fasta file with a positive set of regulatory regions [default %default]")
parser.add_option("-n", "--negatives", dest = "negatives", type = "string",
action = "callback", callback = check_file,
help="path to the fasta file with a negative set of regulatory regions [default %default]")
parser.add_option("-i", "--pgff", dest = "pgff", type = "string",
help="path to the output gff file of motif positions from the positive regulatory regions [default %default]")
parser.add_option("-j", "--ngff", dest = "ngff", type = "string",
help="path to the output gff file of motif positions from the negative regulatory regions [default %default]")
parser.add_option("-x", "--matrix", dest = "imm", type = "string",
help="path to the input or output imm file of motif motif models as position weight matrices [default %default]")
parser.add_option("-m", "--motifs", type = "int", dest = "nof_motifs",
help = "number of motifs to consider [default %default]")
parser.add_option("-l", "--length", type = "int", dest = "motif_length",
help = "length of the motifs to search for [default %default]")
parser.set_defaults(positives = kirmes_ini.POSITIVES_FILENAME,
negatives = kirmes_ini.NEGATIVES_FILENAME,
nof_motifs = kirmes_ini.NOF_MOTIFS,
motif_length = kirmes_ini.MOTIF_LENGTH,
type = kirmes_ini.SAMPLING_STRATEGY,
ngff = kirmes_ini.NGFF_FILENAME,
pgff = kirmes_ini.PGFF_FILENAME,
imm = kirmes_ini.IMM_FILENAME)
def motifScan(fastafile, matrixfile, gfffile):
"""Search for motifs with existing matrix defintion"""
ive = Inclusive()
ive.fastafilename = fastafile
ive.immfilename = matrixfile
gff = ive.getMotifGff()
copy(gff, gfffile)
def kims(options):
"""Run the MotifSampler Program"""
ive = Inclusive()
ive.fastafilename = options.positives
ive.settings.setMotifLength(options.motif_length)
ive.settings.setNofMotifs(options.nof_motifs)
pgff = ive.getMotifGff()
copy(pgff, options.pgff)
#second round, find motifs in negative sequences
ive.fastafilename = options.negatives
ngff = ive.getMotifGff()
copy(ngff, options.ngff)
imm = ive.getMatrixFile()
copy(imm, options.imm)
def krgp(options):
"""Run the Priority Program (falls back to Inclusive)"""
kims(options)
def kkmc(options):
"""Run the kmer counting strategy, search for motifs with KIML"""
mer = Kmers()
mer.setFastaFile(options.positives)
mer.settings.setMotifLength(options.motif_length)
pkmerdict = mer.countKmers(options.nof_motifs)
createIMMFile(options.imm, pkmerdict.keys())
motifScan(options.positives, options.imm, options.pgff)
motifScan(options.negatives, options.imm, options.ngff)
def kiml(options):
"""Search for motifs with existing matrix defintion"""
motifScan(options.positives, options.imm, options.pgff)
if options.negatives:
motifScan(options.negatives, options.imm, options.ngff)
#######################
# main #
#######################
def main(argv = None):
"""main() block"""
if argv is None:
argv = sys.argv
parser = OptionParser(version = "%prog " + __version__, usage = __usage__)
optionparse(parser)
(options, args) = parser.parse_args()
if options.type == "krgp":
krgp(options)
elif options.type == "kkmc":
kkmc(options)
elif options.type == "kiml":
kiml(options)
else:
kims(options)
if __name__ == "__main__":
main()
| bsd-3-clause |
f-prettyland/angr | angr/block.py | 4 | 7002 | import logging
l = logging.getLogger("angr.block")
import pyvex
from archinfo import ArchARM
from .engines import SimEngineVEX
DEFAULT_VEX_ENGINE = SimEngineVEX() # this is only used when Block is not initialized with a project
class Block(object):
BLOCK_MAX_SIZE = 4096
__slots__ = ['_project', '_bytes', '_vex', 'thumb', '_capstone', 'addr', 'size', 'arch', 'instructions',
'_instruction_addrs', '_opt_level'
]
def __init__(self, addr, project=None, arch=None, size=None, byte_string=None, vex=None, thumb=False, backup_state=None,
opt_level=None, num_inst=None, traceflags=0):
# set up arch
if project is not None:
self.arch = project.arch
else:
self.arch = arch
if self.arch is None:
raise ValueError('Either "project" or "arch" has to be specified.')
if isinstance(self.arch, ArchARM):
if addr & 1 == 1:
thumb = True
if thumb:
addr |= 1
else:
thumb = False
self._project = project
self.thumb = thumb
self.addr = addr
self._opt_level = opt_level
if self._project is None and byte_string is None:
raise ValueError('"byte_string" has to be specified if "project" is not provided.')
if size is None:
if byte_string is not None:
size = len(byte_string)
elif vex is not None:
size = vex.size
else:
vex = self._vex_engine.lift(
clemory=project.loader.memory,
state=backup_state,
insn_bytes=byte_string,
addr=addr,
thumb=thumb,
opt_level=opt_level,
num_inst=num_inst,
traceflags=traceflags)
size = vex.size
self._vex = vex
self._capstone = None
self.size = size
self.instructions = num_inst
self._instruction_addrs = []
self._parse_vex_info()
if byte_string is None:
if backup_state is not None:
self._bytes = self._vex_engine._load_bytes(addr - thumb, size, state=backup_state)[0]
if type(self._bytes) is not str:
self._bytes = str(pyvex.ffi.buffer(self._bytes, size))
else:
self._bytes = None
elif type(byte_string) is str:
if self.size is not None:
self._bytes = byte_string[:self.size]
else:
self._bytes = byte_string
else:
# Convert bytestring to a str
# size will ALWAYS be known at this point
self._bytes = str(pyvex.ffi.buffer(byte_string, self.size))
def _parse_vex_info(self):
vex = self._vex
if vex is not None:
self.instructions = vex.instructions
self._instruction_addrs = []
self.size = vex.size
for stmt in vex.statements:
if stmt.tag != 'Ist_IMark':
continue
if self.addr is None:
self.addr = stmt.addr + stmt.delta
self._instruction_addrs.append(stmt.addr + stmt.delta)
def __repr__(self):
return '<Block for %#x, %d bytes>' % (self.addr, self.size)
def __getstate__(self):
return dict((k, getattr(self, k)) for k in self.__slots__ if k not in ('_capstone', ))
def __setstate__(self, data):
for k, v in data.iteritems():
setattr(self, k, v)
def __hash__(self):
return hash((type(self), self.addr, self.bytes))
def __eq__(self, other):
return type(self) is type(other) and \
self.addr == other.addr and \
self.bytes == other.bytes
def __ne__(self, other):
return not self == other
def pp(self):
return self.capstone.pp()
@property
def _vex_engine(self):
if self._project is None:
return DEFAULT_VEX_ENGINE
else:
return self._project.factory.default_engine
@property
def vex(self):
if not self._vex:
self._vex = self._vex_engine.lift(
clemory=self._project.loader.memory if self._project is not None else None,
insn_bytes=self._bytes,
addr=self.addr,
thumb=self.thumb,
size=self.size,
num_inst=self.instructions,
opt_level=self._opt_level,
arch=self.arch,
)
self._parse_vex_info()
return self._vex
@property
def capstone(self):
if self._capstone: return self._capstone
cs = self.arch.capstone if not self.thumb else self.arch.capstone_thumb
insns = []
for cs_insn in cs.disasm(self.bytes, self.addr):
insns.append(CapstoneInsn(cs_insn))
block = CapstoneBlock(self.addr, insns, self.thumb, self.arch)
self._capstone = block
return block
@property
def codenode(self):
return BlockNode(self.addr, self.size, bytestr=self.bytes, thumb=self.thumb)
@property
def bytes(self):
if self._bytes is None:
addr = self.addr
if self.thumb:
addr = (addr >> 1) << 1
self._bytes = ''.join(self._project.loader.memory.read_bytes(addr, self.size))
return self._bytes
@property
def instruction_addrs(self):
if not self._instruction_addrs and self._vex is None:
# initialize instruction addrs
_ = self.vex
return self._instruction_addrs
class CapstoneBlock(object):
"""
Deep copy of the capstone blocks, which have serious issues with having extended lifespans
outside of capstone itself
"""
__slots__ = [ 'addr', 'insns', 'thumb', 'arch' ]
def __init__(self, addr, insns, thumb, arch):
self.addr = addr
self.insns = insns
self.thumb = thumb
self.arch = arch
def pp(self):
print str(self)
def __str__(self):
return '\n'.join(map(str, self.insns))
def __repr__(self):
return '<CapstoneBlock for %#x>' % self.addr
class CapstoneInsn(object):
def __init__(self, capstone_insn):
self.insn = capstone_insn
def __getattr__(self, item):
if item in ('__str__', '__repr__'):
return self.__getattribute__(item)
if hasattr(self.insn, item):
return getattr(self.insn, item)
raise AttributeError()
def __str__(self):
return "%#x:\t%s\t%s" % (self.address, self.mnemonic, self.op_str)
def __repr__(self):
return '<CapstoneInsn "%s" for %#x>' % (self.mnemonic, self.address)
from .codenode import BlockNode
| bsd-2-clause |
blazek/QGIS | tests/src/python/test_qgsserver_apicontext.py | 25 | 1835 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServerApiContext class.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '11/07/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
import re
# Deterministic XML
os.environ['QT_HASH_SEED'] = '1'
from qgis.server import (
QgsBufferServerRequest,
QgsBufferServerResponse,
QgsServerApiContext
)
from qgis.testing import unittest
from utilities import unitTestDataPath
from urllib import parse
import tempfile
from test_qgsserver import QgsServerTestBase
class QgsServerApiContextsTest(QgsServerTestBase):
""" QGIS Server API context tests"""
def testMatchedPath(self):
"""Test path extraction"""
response = QgsBufferServerResponse()
request = QgsBufferServerRequest("http://www.qgis.org/services/wfs3")
context = QgsServerApiContext("/wfs3", request, response, None, None)
self.assertEqual(context.matchedPath(), "/services/wfs3")
request = QgsBufferServerRequest("http://www.qgis.org/services/wfs3/collections.hml")
context = QgsServerApiContext("/wfs3", request, response, None, None)
self.assertEqual(context.matchedPath(), "/services/wfs3")
request = QgsBufferServerRequest("http://www.qgis.org/services/wfs3/collections.hml")
context = QgsServerApiContext("/wfs4", request, response, None, None)
self.assertEqual(context.matchedPath(), "")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
stopthatcow/zazu | zazu/style.py | 1 | 6180 | # -*- coding: utf-8 -*-
"""Style functions for zazu."""
import zazu.imports
zazu.imports.lazy_import(locals(), [
'click',
'difflib',
'functools',
'os',
'threading',
'sys',
'zazu.config',
'zazu.git_helper',
'zazu.styler',
'zazu.util'
])
__author__ = 'Nicholas Wiles'
__copyright__ = 'Copyright 2016'
default_exclude_paths = ['build',
'dependency',
'dependencies']
def read_file(path):
"""Read a file and return its contents as a string."""
with open(path, 'r') as f:
return f.read()
def write_file(path, _, styled_string):
"""Write styled_string string to a file."""
with open(path, 'w') as f:
return f.write(styled_string)
"""The git binary doesn't allow concurrent access, so serailize calls to it using a lock."""
git_lock = threading.Lock()
def stage_patch(path, input_string, styled_string):
"""Create a patch between input_string and output_string and add the patch to the git staging area.
Args:
path: the path of the file being patched.
input_string: the current state of the file in the git stage.
styled_string: the properly styled string to stage.
"""
# If the input was the same as the current file contents, apply the styling locally and add it.
if read_file(path) == input_string:
write_file(path, '', styled_string)
with git_lock:
zazu.util.check_output(['git', 'add', path])
else:
# The file is partially staged. We must apply a patch to the staging area.
input_lines = input_string.splitlines()
styled_lines = styled_string.splitlines()
patch = difflib.unified_diff(input_lines, styled_lines, 'a/' + path, 'b/' + path, lineterm='')
patch_string = '\n'.join(patch) + '\n'
if input_string[-1] != '\n':
# This is to address a bizarre issue with git apply whereby if the staged file doesn't end in a newline,
# the patch will fail to apply.
raise click.ClickException('File "{}" must have a trailing newline'.format(path))
with git_lock:
zazu.util.check_popen(args=['git', 'apply', '--cached', '--verbose', '-'], stdin_str=patch_string,
universal_newlines=True)
def style_file(stylers, path, read_fn, write_fn):
"""Style a file.
Args:
styler: the styler to use to style the file.
path: the file path.
read_fn: function used to read in the file contents.
write_fn: function used to write out the styled file, or None
"""
input_string = read_fn(path)
styled_string = input_string
for styler in stylers:
styled_string = styler.style_string(styled_string, path)
violation = styled_string != input_string
if violation and callable(write_fn):
write_fn(path, input_string, styled_string)
return path, stylers, violation
def styler_list(file, sets, keys):
"""Get the list of stylers to apply to a file based on the file set of each styler."""
return [s for s in keys if file in sets[s]]
@click.command()
@zazu.config.pass_config
@click.option('-v', '--verbose', is_flag=True, help='print files that are dirty')
@click.option('--check', is_flag=True, help='only check the repo for style violations, do not correct them')
@click.option('--cached', is_flag=True, help='only examine/fix files that are staged for SCM commit')
def style(config, verbose, check, cached):
"""Style repo files or check that they are valid style."""
config.check_repo()
violation_count = 0
stylers = config.stylers()
fixed_ok_tags = [click.style('FIXED', fg='red', bold=True), click.style(' OK ', fg='green', bold=True)]
tags = zazu.util.FAIL_OK if check else fixed_ok_tags
with zazu.util.cd(config.repo_root):
if stylers:
if cached:
staged_files = zazu.git_helper.get_touched_files(config.repo)
read_fn = zazu.git_helper.read_staged
write_fn = stage_patch
else:
read_fn = read_file
write_fn = write_file
if check:
write_fn = None
# Determine files for each styler.
file_sets = {}
styler_file_sets = {}
all_files = set()
for s in stylers:
includes = tuple(s.includes)
excludes = tuple(s.excludes)
if (includes, excludes) not in file_sets:
files = set(zazu.util.scantree(config.repo_root,
includes,
excludes,
exclude_hidden=True))
if cached:
files = files.intersection(staged_files)
file_sets[(includes, excludes)] = files
else:
files = file_sets[(includes, excludes)]
styler_file_sets[s] = files
all_files |= files
work = [functools.partial(style_file, styler_list(f, styler_file_sets, stylers), f, read_fn, write_fn) for f in all_files]
checked_files = zazu.util.dispatch(work)
for f, stylers, violation in checked_files:
if verbose:
click.echo(zazu.util.format_checklist_item(not violation,
text='({}) {}'.format(', '.join([s.name() for s in stylers]), f),
tag_formats=tags))
violation_count += violation
if verbose:
file_count = len(all_files)
if check:
click.echo('{} files with violations in {} files'.format(violation_count, file_count))
else:
click.echo('{} files fixed in {} files'.format(violation_count, file_count))
sys.exit(-1 if check and violation_count else 0)
else:
click.echo('no style settings found')
| mit |
fuselock/odoo | addons/purchase_analytic_plans/__init__.py | 441 | 1220 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import purchase_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Viktor-Evst/fixed-luigi | test/snakebite_test.py | 25 | 3738 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import posixpath
import time
import unittest
import luigi.target
from luigi import six
from nose.plugins.attrib import attr
if six.PY3:
raise unittest.SkipTest("snakebite doesn't work on Python 3 yet.")
try:
from luigi.contrib.hdfs import SnakebiteHdfsClient
from minicluster import MiniClusterTestCase
except ImportError:
raise unittest.SkipTest('Snakebite not installed')
@attr('minicluster')
class TestSnakebiteClient(MiniClusterTestCase):
"""This test requires a snakebite -- it finds it from your
luigi.cfg"""
snakebite = None
def get_client(self):
return SnakebiteHdfsClient()
def setUp(self):
""" We override setUp because we want to also use snakebite for
creating the testing directory. """
self.testDir = "/tmp/luigi-test-{0}-{1}".format(
os.environ["USER"],
time.mktime(datetime.datetime.now().timetuple())
)
self.snakebite = self.get_client()
self.assertTrue(self.snakebite.mkdir(self.testDir))
def tearDown(self):
if self.snakebite.exists(self.testDir):
self.snakebite.remove(self.testDir, True)
def test_exists(self):
self.assertTrue(self.snakebite.exists(self.testDir))
def test_rename(self):
foo = posixpath.join(self.testDir, "foo")
bar = posixpath.join(self.testDir, "bar")
self.assertTrue(self.snakebite.mkdir(foo))
self.assertTrue(self.snakebite.rename(foo, bar))
self.assertTrue(self.snakebite.exists(bar))
def test_rename_trailing_slash(self):
foo = posixpath.join(self.testDir, "foo")
bar = posixpath.join(self.testDir, "bar/")
self.assertTrue(self.snakebite.mkdir(foo))
self.assertTrue(self.snakebite.rename(foo, bar))
self.assertTrue(self.snakebite.exists(bar))
self.assertFalse(self.snakebite.exists(posixpath.join(bar, 'foo')))
def test_relativepath(self):
rel_test_dir = "." + os.path.split(self.testDir)[1]
try:
self.assertFalse(self.snakebite.exists(rel_test_dir))
self.snakebite.mkdir(rel_test_dir)
self.assertTrue(self.snakebite.exists(rel_test_dir))
finally:
if self.snakebite.exists(rel_test_dir):
self.snakebite.remove(rel_test_dir, True)
def test_rename_dont_move(self):
foo = posixpath.join(self.testDir, "foo")
bar = posixpath.join(self.testDir, "bar")
self.assertTrue(self.snakebite.mkdir(foo))
self.assertTrue(self.snakebite.mkdir(bar))
self.assertTrue(self.snakebite.exists(foo)) # For sanity
self.assertTrue(self.snakebite.exists(bar)) # For sanity
self.assertRaises(luigi.target.FileAlreadyExists,
lambda: self.snakebite.rename_dont_move(foo, bar))
self.assertTrue(self.snakebite.exists(foo))
self.assertTrue(self.snakebite.exists(bar))
self.snakebite.rename_dont_move(foo, foo + '2')
self.assertFalse(self.snakebite.exists(foo))
self.assertTrue(self.snakebite.exists(foo + '2'))
| apache-2.0 |
Wikidata/QueryAnalysis | tools/getSparqlStatistic.py | 1 | 3883 | import argparse
import os
import sys
from collections import defaultdict
from pprint import pprint
import config
from postprocess import processdata
from utility import utility
parser = argparse.ArgumentParser(description="Prints out the SPARQL statistic")
parser.add_argument(
"--monthsFolder",
"-m",
default=config.monthsFolder,
type=str,
help="the folder in which the months directory " + "are residing")
parser.add_argument(
"--ignoreLock",
"-i",
help="Ignore locked file and execute" + " anyways",
action="store_true")
parser.add_argument(
"--position",
"-p",
default="default position",
type=str,
help="The position to be displayed before the data.")
parser.add_argument(
"month", type=str, help="the month which we're interested in")
if (len(sys.argv[1:]) == 0):
parser.print_help()
parser.exit()
args = parser.parse_args()
if os.path.isfile(utility.addMissingSlash(args.monthsFolder)
+ utility.addMissingSlash(args.month) + "locked") \
and not args.ignoreLock:
print("ERROR: The month " + str(args.month) +
" is being edited at the moment." +
" Use -i if you want to force the execution of this script.")
sys.exit()
class SparqlStatisticHandler:
statistic = defaultdict(int)
totalCount = 0
def handle(self, sparqlQuery, processed):
if (processed['#Valid'] == 'VALID'):
self.totalCount += 1
usedSparqlFeatures = processed['#UsedSparqlFeatures']
for usedSparqlFeature in usedSparqlFeatures.split(","):
self.statistic[usedSparqlFeature.lstrip()] += 1
def printKeys(self, keys):
result = ""
i = 1
for featureName in keys:
featureCount = self.statistic[featureName]
# result += featureName + ": " + str(featureCount) + "\n"
result += str(featureCount) + "\n"
i += 1
print(result)
def printSparqlTranslation(self):
self.statistic["Select"] = self.statistic["SelectQuery"]
self.statistic["Ask"] = self.statistic["AskQuery"]
self.statistic["Describe"] = self.statistic["DescribeQuery"]
self.statistic["Construct"] = self.statistic["ConstructQuery"]
self.statistic["Order By"] = self.statistic["OrderClause"]
self.statistic["Union"] = self.statistic["UnionGraphPattern"]
self.statistic["Optional"] = self.statistic["OptionalGraphPattern"]
self.statistic["Not Exists"] = self.statistic["NotExistsFunc"]
self.statistic["Minus"] = self.statistic["MinusGraphPattern"]
self.statistic["Exists"] = self.statistic["ExistsFunc"]
self.statistic["Group By"] = self.statistic["GroupClause"]
self.statistic["Having"] = self.statistic["HavingClause"]
self.statistic["Service"] = self.statistic["ServiceGraphPattern"]
self.statistic["And"] = self.statistic["Join"]
self.statistic["Values"] = self.statistic["BindingValue"]
self.statistic["'+"] = self.statistic["+"]
self.statistic["Subquery"] = self.statistic["SubSelect"]
# only print specified columns
toPrintKeys = [
"Select", "Ask", "Describe", "Construct", "Distinct", "Limit",
"Offset", "Order By", "Filter", "And", "Union", "Optional",
"Graph", "Not Exists", "Minus", "Exists", "Count", "Max", "Min",
"Avg", "Sum", "Group By", "Having", "Service", "LangService",
"Sample", "Bind", "GroupConcat", "Reduced", "Values", "'+", "*",
"Subquery"
]
self.printKeys(toPrintKeys)
print(" ")
print(str(self.totalCount))
handler = SparqlStatisticHandler()
processdata.processMonth(
handler, args.month, args.monthsFolder, notifications=False)
print args.position
handler.printSparqlTranslation()
| apache-2.0 |
alunarbeach/spark | examples/src/main/python/streaming/recoverable_network_wordcount.py | 80 | 4423 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in text encoded with UTF8 received from the network every second.
Usage: recoverable_network_wordcount.py <hostname> <port> <checkpoint-directory> <output-file>
<hostname> and <port> describe the TCP server that Spark Streaming would connect to receive
data. <checkpoint-directory> directory to HDFS-compatible file system which checkpoint data
<output-file> file to which the word counts will be appended
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/recoverable_network_wordcount.py \
localhost 9999 ~/checkpoint/ ~/out`
If the directory ~/checkpoint/ does not exist (e.g. running for the first time), it will create
a new StreamingContext (will print "Creating new context" to the console). Otherwise, if
checkpoint data exists in ~/checkpoint/, then it will create StreamingContext from
the checkpoint data.
"""
from __future__ import print_function
import os
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
# Get or register a Broadcast variable
def getWordBlacklist(sparkContext):
if ('wordBlacklist' not in globals()):
globals()['wordBlacklist'] = sparkContext.broadcast(["a", "b", "c"])
return globals()['wordBlacklist']
# Get or register an Accumulator
def getDroppedWordsCounter(sparkContext):
if ('droppedWordsCounter' not in globals()):
globals()['droppedWordsCounter'] = sparkContext.accumulator(0)
return globals()['droppedWordsCounter']
def createContext(host, port, outputPath):
# If you do not see this printed, that means the StreamingContext has been loaded
# from the new checkpoint
print("Creating new context")
if os.path.exists(outputPath):
os.remove(outputPath)
sc = SparkContext(appName="PythonStreamingRecoverableNetworkWordCount")
ssc = StreamingContext(sc, 1)
# Create a socket stream on target ip:port and count the
# words in input stream of \n delimited text (eg. generated by 'nc')
lines = ssc.socketTextStream(host, port)
words = lines.flatMap(lambda line: line.split(" "))
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
def echo(time, rdd):
# Get or register the blacklist Broadcast
blacklist = getWordBlacklist(rdd.context)
# Get or register the droppedWordsCounter Accumulator
droppedWordsCounter = getDroppedWordsCounter(rdd.context)
# Use blacklist to drop words and use droppedWordsCounter to count them
def filterFunc(wordCount):
if wordCount[0] in blacklist.value:
droppedWordsCounter.add(wordCount[1])
False
else:
True
counts = "Counts at time %s %s" % (time, rdd.filter(filterFunc).collect())
print(counts)
print("Dropped %d word(s) totally" % droppedWordsCounter.value)
print("Appending to " + os.path.abspath(outputPath))
with open(outputPath, 'a') as f:
f.write(counts + "\n")
wordCounts.foreachRDD(echo)
return ssc
if __name__ == "__main__":
if len(sys.argv) != 5:
print("Usage: recoverable_network_wordcount.py <hostname> <port> "
"<checkpoint-directory> <output-file>", file=sys.stderr)
exit(-1)
host, port, checkpoint, output = sys.argv[1:]
ssc = StreamingContext.getOrCreate(checkpoint,
lambda: createContext(host, int(port), output))
ssc.start()
ssc.awaitTermination()
| apache-2.0 |
Zing22/uemscode | tmp_test.py | 1 | 1634 | # -*- coding=utf-8 -*-
#### for testing steps
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.externals import joblib
from PIL import Image
from process import toBin, cropLetters
from img2feature import toFeature
from main import readAllFiles
TEMP_DIR = 'tmp/'
def test_onePic():
path = input('Pic path:')
img = Image.open(path)
bimg = toBin(img)
bimg.save(TEMP_DIR+ 'bimg.jpg')
success, letters = cropLetters(bimg)
if not success:
print('Crop failed.')
print(letters)
return
features = []
for l in letters:
features.append([int(x) for x in toFeature(l).split(' ')])
l.save(TEMP_DIR + '%d.jpg' % len(features))
pre = clf.predict(features)
code = ''.join([chr(x + ord('A')) for x in pre])
print(code)
def test_tmp_dir():
filenames = readAllFiles(TEMP_DIR)
for file in filenames:
img = Image.open(TEMP_DIR + file)
bimg = toBin(img)
bimg.save(TEMP_DIR + 'tmp_' + file)
success, letters = cropLetters(bimg)
if not success:
print('Crop failed.')
print(letters)
return
features = []
for l in letters:
features.append([int(x) for x in toFeature(l).split(' ')])
# l.save(TEMP_DIR + '%d.jpg' % len(features))
pre = clf.predict(features)
code = ''.join([chr(x + ord('A')) for x in pre])
print(code)
SAVE_TO = 'model.pkl'
def main():
global clf
clf = joblib.load(SAVE_TO)
test_onePic()
# test_tmp_dir()
if __name__ == '__main__':
main() | mit |
hoatle/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aarsan/azure-quickstart-templates | hortonworks-on-centos/scripts/vm-bootstrap.py | 89 | 53170 | #
# vm-bootstrap.py
#
# This script is used to prepare VMs launched via HDP Cluster Install Blade on Azure.
#
# Parameters passed from the bootstrap script invocation by the controller (shown in the parameter order).
# Required parameters:
# action: "bootstrap" to set up VM and initiate cluster deployment. "check" for checking on cluster deployment status.
# cluster_id: user-specified name of the cluster
# admin_password: password for the Ambari "admin" user
# Required parameters for "bootstrap" action:
# scenario_id: "evaluation" or "standard"
# num_masters: number of masters in the cluster
# num_workers: number of workers in the cluster
# master_prefix: hostname prefix for master hosts (master hosts are named <cluster_id>-<master_prefix>-<id>
# worker_prefix: hostname prefix for worker hosts (worker hosts are named <cluster_id>-<worker_prefix>-<id>
# domain_name: the domain name part of the hosts, starting with a period (e.g., .cloudapp.net)
# id_padding: number of digits for the host <id> (e.g., 2 uses <id> like 01, 02, .., 10, 11)
# masters_iplist: list of masters' local IPV4 addresses sorted from master_01 to master_XX delimited by a ','
# workers_iplist: list of workers' local IPV4 addresses sorted from worker_01 to worker_XX delimited by a ','
# Required parameters for "check" action:
# --check_timeout_seconds:
# the number of seconds after which the script is required to exit
# --report_timeout_fail:
# if "true", exit code 1 is returned in case deployment has failed, or deployment has not finished after
# check_timeout_seconds
# if "false", exit code 0 is returned if deployment has finished successfully, or deployment has not finished after
# check_timeout_seconds
# Optional:
# protocol: if "https" (default), https:8443 is used for Ambari. Otherwise, Ambari uses http:8080
from optparse import OptionParser
import base64
import json
import logging
import os
import pprint
import re
import socket
import sys
import time
import urllib2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/tmp/vm-bootstrap.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting VM Bootstrap...')
parser = OptionParser()
parser.add_option("--cluster_id", type="string", dest="cluster_id")
parser.add_option("--scenario_id", type="string", dest="scenario_id", default="evaluation")
parser.add_option("--num_masters", type="int", dest="num_masters")
parser.add_option("--num_workers", type="int", dest="num_workers")
parser.add_option("--master_prefix", type="string", dest="master_prefix")
parser.add_option("--worker_prefix", type="string", dest="worker_prefix")
parser.add_option("--domain_name", type="string", dest="domain_name")
parser.add_option("--id_padding", type="int", dest="id_padding", default=2)
parser.add_option("--admin_password", type="string", dest="admin_password", default="admin")
parser.add_option("--masters_iplist", type="string", dest="masters_iplist")
parser.add_option("--workers_iplist", type="string", dest="workers_iplist")
parser.add_option("--protocol", type="string", dest="protocol", default="https")
parser.add_option("--action", type="string", dest="action", default="bootstrap")
parser.add_option("--check_timeout_seconds", type="int", dest="check_timeout_seconds", default="250")
parser.add_option("--report_timeout_fail", type="string", dest="report_timeout_fail", default="false")
(options, args) = parser.parse_args()
cluster_id = options.cluster_id
scenario_id = options.scenario_id.lower()
num_masters = options.num_masters
num_workers = options.num_workers
master_prefix = options.master_prefix
worker_prefix = options.worker_prefix
domain_name = options.domain_name
id_padding = options.id_padding
admin_password = options.admin_password
masters_iplist = options.masters_iplist
workers_iplist = options.workers_iplist
protocol = options.protocol
action = options.action
check_timeout_seconds = options.check_timeout_seconds
report_timeout_fail = options.report_timeout_fail.lower() == "true"
logger.info('action=' + action)
admin_username = 'admin'
current_admin_password = 'admin'
request_timeout = 30
port = '8443' if (protocol == 'https') else '8080'
http_handler = urllib2.HTTPHandler(debuglevel=1)
opener = urllib2.build_opener(http_handler)
urllib2.install_opener(opener)
class TimeoutException(Exception):
pass
def get_ambari_auth_string():
return 'Basic ' + base64.encodestring('%s:%s' % (admin_username, current_admin_password)).replace('\n', '')
def run_system_command(command):
os.system(command)
def get_hostname(id):
if id <= num_masters:
return master_prefix + str(id).zfill(id_padding)
else:
return worker_prefix + str(id - num_masters).zfill(id_padding)
def get_fqdn(id):
return get_hostname(id) + domain_name
def get_host_ip(hostname):
if (hostname.startswith(master_prefix)):
return masters_iplist[int(hostname.split('-')[-1]) -1]
else:
return workers_iplist[int(hostname.split('-')[-1]) -1]
def get_host_ip_map(hostnames):
host_ip_map = {}
for hostname in hostnames:
num_tries = 0
ip = None
while ip is None and num_tries < 5:
try:
ip = get_host_ip(hostname)
# ip = socket.gethostbyname(hostname)
except:
time.sleep(1)
num_tries = num_tries + 1
continue
if ip is None:
logger.info('Failed to look up ip address for ' + hostname)
raise
else:
logger.info(hostname + ' resolved to ' + ip)
host_ip_map[hostname] = ip
return host_ip_map
def update_etc_hosts(host_ip_map):
logger.info('Adding entries to /etc/hosts file...')
with open("/etc/hosts", "a") as file:
for host in sorted(host_ip_map):
file.write('%s\t%s\t%s\n' % (host_ip_map[host], host + domain_name, host))
logger.info('Finished updating /etc/hosts')
def update_ambari_agent_ini(ambari_server_hostname):
logger.info('Updating ambari-agent.ini file...')
command = 'sed -i s/hostname=localhost/hostname=%s/ /etc/ambari-agent/conf/ambari-agent.ini' % ambari_server_hostname
logger.info('Executing command: ' + command)
run_system_command(command)
logger.info('Finished updating ambari-agent.ini file')
def patch_ambari_agent():
logger.info('Patching ambari-agent to prevent rpmdb corruption...')
logger.info('Finished patching ambari-server')
def enable_https():
command = """
printf 'api.ssl=true\nclient.api.ssl.cert_name=https.crt\nclient.api.ssl.key_name=https.key\nclient.api.ssl.port=8443' >> /etc/ambari-server/conf/ambari.properties
mkdir /root/ambari-cert
cd /root/ambari-cert
# create server.crt and server.key (self-signed)
openssl genrsa -out server.key 2048
openssl req -new -key server.key -out server.csr -batch
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
echo PulUuMWPp0o4Lq6flGA0NGDKNRZQGffW2mWmJI3klSyspS7mUl > pass.txt
cp pass.txt passin.txt
# encrypts server.key with des3 as server.key.secured with the specified password
openssl rsa -in server.key -des3 -out server.key.secured -passout file:pass.txt
# creates /tmp/https.keystore.p12
openssl pkcs12 -export -in 'server.crt' -inkey 'server.key.secured' -certfile 'server.crt' -out '/var/lib/ambari-server/keys/https.keystore.p12' -password file:pass.txt -passin file:passin.txt
mv pass.txt /var/lib/ambari-server/keys/https.pass.txt
cd ..
rm -rf /root/ambari-cert
"""
run_system_command(command)
def set_admin_password(new_password, timeout):
logger.info('Setting admin password...')
def poll_until_all_agents_registered(num_hosts, timeout):
url = '%s://localhost:%s/api/v1/hosts' % (protocol, port)
logger.info('poll until all agents')
all_hosts_registered = False
start_time = time.time()
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if len(jsonResult['items']) >= num_hosts:
all_hosts_registered = True
break
except :
logger.exception('Could not poll agent status from the server.')
time.sleep(5)
if not all_hosts_registered:
raise Exception('Timed out while waiting for all agents to register')
def is_ambari_server_host():
hostname = socket.getfqdn()
hostname = hostname.split('.')[0]
logger.info(hostname)
logger.info('Checking ambari host')
logger.info(ambari_server_hostname)
return hostname == ambari_server_hostname
def create_blueprint(scenario_id):
blueprint_name = 'myblueprint'
logger.info('Creating blueprint for scenario %s' % scenario_id)
url = '%s://localhost:%s/api/v1/blueprints/%s' % (protocol, port, blueprint_name)
evaluation_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "DRPC_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "3"
}
]
small_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "9"
}
]
medium_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "HIVE_SERVER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "HIVE_SERVER"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "STORM_UI_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "SUPERVISOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "99"
}
]
large_host_groups = [
{
"name" : "master_1",
"components" : [
{
"name" : "AMBARI_SERVER"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_2",
"components" : [
{
"name" : "METRICS_COLLECTOR"
},
{
"name" : "NAMENODE"
},
{
"name" : "NIMBUS"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_3",
"components" : [
{
"name" : "DRPC_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "HIVE_METASTORE"
},
{
"name" : "KAFKA_BROKER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_4",
"components" : [
{
"name" : "HIVE_METASTORE"
},
{
"name" : "MYSQL_SERVER"
},
{
"name" : "SECONDARY_NAMENODE"
},
{
"name" : "SPARK_JOBHISTORYSERVER"
},
{
"name" : "ZOOKEEPER_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_5",
"components" : [
{
"name" : "NODEMANAGER"
},
{
"name" : "OOZIE_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_6",
"components" : [
{
"name" : "RESOURCEMANAGER"
},
{
"name" : "WEBHCAT_SERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_7",
"components" : [
{
"name" : "HBASE_MASTER"
},
{
"name" : "HISTORYSERVER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "master_8",
"components" : [
{
"name" : "APP_TIMELINE_SERVER"
},
{
"name" : "FALCON_SERVER"
},
{
"name" : "NIMBUS"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "1"
},
{
"name" : "workers",
"components" : [
{
"name" : "DATANODE"
},
{
"name" : "HBASE_REGIONSERVER"
},
{
"name" : "NODEMANAGER"
},
{
"name" : "FALCON_CLIENT"
},
{
"name" : "HBASE_CLIENT"
},
{
"name" : "HCAT"
},
{
"name" : "HDFS_CLIENT"
},
{
"name" : "HIVE_CLIENT"
},
{
"name" : "MAPREDUCE2_CLIENT"
},
{
"name" : "METRICS_MONITOR"
},
{
"name" : "OOZIE_CLIENT"
},
{
"name" : "PIG"
},
{
"name" : "SLIDER"
},
{
"name" : "SPARK_CLIENT"
},
{
"name" : "SQOOP"
},
{
"name" : "TEZ_CLIENT"
},
{
"name" : "YARN_CLIENT"
},
{
"name" : "ZOOKEEPER_CLIENT"
}
],
"cardinality" : "200"
}
]
if scenario_id == 'evaluation':
host_groups = evaluation_host_groups
elif scenario_id == 'small':
host_groups = small_host_groups
elif scenario_id == 'medium':
host_groups = medium_host_groups
elif scenario_id == 'large':
host_groups = large_host_groups
host_groups = evaluation_host_groups if scenario_id == 'evaluation' else small_host_groups
evaluation_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "sandbox",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY"
}
},
{
"hdfs-site" : {
"dfs.block.size" : "34217472",
"dfs.replication" : "1",
"dfs.namenode.accesstime.precision" : "3600000",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs",
"dfs.nfs.exports.allowed.hosts" : "* rw",
"dfs.datanode.max.xcievers" : "1024",
"dfs.block.access.token.enable" : "false",
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hive-site" : {
"javax.jdo.option.ConnectionPassword" : "hive",
"hive.tez.container.size" : "250",
"hive.tez.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true",
"hive.heapsize" : "250",
"hive.users.in.admin.role" : "hue,hive",
"hive_metastore_user_passwd" : "hive",
"hive.server2.enable.impersonation": "true",
"hive.compactor.check.interval": "300s",
"hive.compactor.initiator.on": "true",
"hive.compactor.worker.timeout": "86400s",
"hive.enforce.bucketing": "true",
"hive.support.concurrency": "true",
"hive.exec.dynamic.partition.mode": "nonstrict",
"hive.server2.enable.doAs": "true",
"hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
"hive.txn.max.open.batch": "1000",
"hive.txn.timeout": "300",
"hive.security.authorization.enabled": "false",
"hive.users.in.admin.role": "hue,hive",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_2%:9083"
}
},
{
"mapred-env": {
"jobhistory_heapsize" : "250"
}
},
{
"mapred-site" : {
"mapreduce.map.memory.mb" : "250",
"mapreduce.reduce.memory.mb" : "250",
"mapreduce.task.io.sort.mb" : "64",
"yarn.app.mapreduce.am.resource.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
"mapred.job.reduce.memory.mb" : "250",
"mapred.child.java.opts" : "-Xmx200m",
"mapred.job.map.memory.mb" : "250",
"io.sort.mb" : "64",
"mapreduce.map.java.opts" : "-Xmx200m",
"mapreduce.reduce.java.opts" : "-Xmx200m"
}
},
{
"oozie-site" : {
"oozie.service.ProxyUserService.proxyuser.hue.hosts" : "*",
"oozie.service.ProxyUserService.proxyuser.hue.groups" : "*",
"oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
"oozie.service.ProxyUserService.proxyuser.falcon.groups": "*",
"oozie.service.JPAService.jdbc.password" : "oozie"
}
},
{
"storm-site" : {
"logviewer.port" : 8005,
"nimbus.childopts" : "-Xmx220m -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=sandbox.hortonworks.com,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
"ui.childopts" : "-Xmx220m",
"drpc.childopts" : "-Xmx220m"
}
},
{
"tez-site" : {
"tez.am.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
"tez.am.resource.memory.mb" : "250",
"tez.dag.am.resource.memory.mb" : "250",
"yarn.app.mapreduce.am.command-opts" : "-Xmx200m"
}
},
{
"webhcat-site" : {
"webhcat.proxyuser.hue.hosts" : "*",
"webhcat.proxyuser.hue.groups" : "*",
"webhcat.proxyuser.hcat.hosts" : "*",
"webhcat.proxyuser.hcat.groups" : "*",
"templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://sandbox.hortonworks.com:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse"
}
},
{
"yarn-env": {
"apptimelineserver_heapsize" : "250",
"resourcemanager_heapsize" : "250",
"nodemanager_heapsize" : "250",
"yarn_heapsize" : "250"
}
},
{
"yarn-site" : {
"yarn.nodemanager.resource.memory-mb": "2250",
"yarn.nodemanager.vmem-pmem-ratio" : "10",
"yarn.scheduler.minimum-allocation-mb" : "250",
"yarn.scheduler.maximum-allocation-mb" : "2250",
"yarn.nodemanager.pmem-check-enabled" : "false",
"yarn.acl.enable" : "false",
"yarn.resourcemanager.webapp.proxyuser.hcat.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.hcat.hosts" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.groups" : "*",
"yarn.resourcemanager.webapp.proxyuser.oozie.hosts" : "*"
}
}
]
standard_configurations = [
{
"ams-hbase-env" : {
"properties" : {
"hbase_master_heapsize" : "512m",
"hbase_regionserver_heapsize" : "512m",
"hbase_regionserver_xmn_max" : "256m",
"hbase_regionserver_xmn_ratio" : "0.2"
}
}
},
{
"capacity-scheduler" : {
"yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
"yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5"
}
},
{
"cluster-env": {
"cluster_name": "hdp",
"smokeuser": "ambari-qa",
"user_group": "hadoop",
"security_enabled": "false"
}
},
{
"core-site" : {
"hadoop.proxyuser.hue.hosts" : "*",
"hadoop.proxyuser.hue.groups" : "*",
"hadoop.proxyuser.root.hosts" : "*",
"hadoop.proxyuser.root.groups" : "*",
"hadoop.proxyuser.oozie.hosts" : "*",
"hadoop.proxyuser.oozie.groups" : "*",
"hadoop.proxyuser.hcat.hosts" : "*",
"hadoop.proxyuser.hcat.groups" : "*"
}
},
{
"hadoop-env": {
"dtnode_heapsize" : "250",
"hadoop_heapsize" : "250",
"namenode_heapsize" : "250",
"namenode_opt_newsize": "50",
"namenode_opt_maxnewsize": "100"
}
},
{
"yarn-site": {
"yarn.nodemanager.local-dirs": "/disks/0/hadoop/yarn/local,/disks/1/hadoop/yarn/local,/disks/2/hadoop/yarn/local,/disks/3/hadoop/yarn/local,/disks/4/hadoop/yarn/local,/disks/5/hadoop/yarn/local,/disks/6/hadoop/yarn/local,/disks/7/hadoop/yarn/local",
"yarn.nodemanager.log-dirs": "/disks/0/hadoop/yarn/log,/disks/1/hadoop/yarn/log,/disks/2/hadoop/yarn/log,/disks/3/hadoop/yarn/log,/disks/4/hadoop/yarn/log,/disks/5/hadoop/yarn/log,/disks/6/hadoop/yarn/log,/disks/7/hadoop/yarn/log,/disks/8/hadoop/yarn/log,/disks/9/hadoop/yarn/log,/disks/10/hadoop/yarn/log,/disks/11/hadoop/yarn/log,/disks/12/hadoop/yarn/log,/disks/13/hadoop/yarn/log,/disks/14/hadoop/yarn/log,/disks/15/hadoop/yarn/log",
"yarn.timeline-service.leveldb-timeline-store.path": "/disks/0/hadoop/yarn/timeline",
"yarn.nodemanager.resource.memory-mb" : "32768",
"yarn.scheduler.maximum-allocation-mb" : "32768",
"yarn.scheduler.minimum-allocation-mb" : "2048"
}
},
{
"tez-site": {
"tez.am.resource.memory.mb" : "2048",
"tez.am.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC"
}
},
{
"mapred-site": {
"mapreduce.map.java.opts" : "-Xmx1638m",
"mapreduce.map.memory.mb" : "2048",
"mapreduce.reduce.java.opts" : "-Xmx1638m",
"mapreduce.reduce.memory.mb" : "2048",
"mapreduce.task.io.sort.mb" : "819",
"yarn.app.mapreduce.am.command-opts" : "-Xmx1638m",
"yarn.app.mapreduce.am.resource.mb" : "2048"
}
},
{
"hdfs-site": {
"dfs.datanode.data.dir": "/disks/0/hadoop/hdfs/data,/disks/1/hadoop/hdfs/data,/disks/2/hadoop/hdfs/data,/disks/3/hadoop/hdfs/data,/disks/4/hadoop/hdfs/data,/disks/5/hadoop/hdfs/data,/disks/6/hadoop/hdfs/data,/disks/7/hadoop/hdfs/data,/disks/8/hadoop/hdfs/data,/disks/9/hadoop/hdfs/data,/disks/10/hadoop/hdfs/data,/disks/11/hadoop/hdfs/data,/disks/12/hadoop/hdfs/data,/disks/13/hadoop/hdfs/data,/disks/14/hadoop/hdfs/data,/disks/15/hadoop/hdfs/data",
"dfs.namenode.checkpoint.dir": "/disks/0/hadoop/hdfs/namesecondary",
"dfs.namenode.name.dir": "/disks/0/hadoop/hdfs/namenode,/disks/1/hadoop/hdfs/namenode,/disks/2/hadoop/hdfs/namenode,/disks/3/hadoop/hdfs/namenode,/disks/4/hadoop/hdfs/namenode,/disks/5/hadoop/hdfs/namenode,/disks/6/hadoop/hdfs/namenode,/disks/7/hadoop/hdfs/namenode",
"dfs.datanode.failed.volumes.tolerated": "6",
"dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs"
}
},
{
"global": {
"oozie_data_dir": "/disks/0/hadoop/oozie/data",
"zk_data_dir": "/disks/0/hadoop/zookeeper",
"falcon.embeddedmq.data": "/disks/0/hadoop/falcon/embeddedmq/data",
"falcon_local_dir": "/disks/0/hadoop/falcon",
"namenode_heapsize" : "16384m"
}
},
{
"hbase-site" : {
"hbase.security.authorization": "true",
"hbase.rpc.engine": "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
"hbase_master_heapsize": "250",
"hbase_regionserver_heapsize": "250",
"hbase.rpc.protection": "PRIVACY",
"hbase.tmp.dir": "/disks/0/hadoop/hbase"
}
},
{
"storm-site": {
"storm.local.dir": "/disks/0/hadoop/storm"
}
},
{
"falcon-startup.properties": {
"*.config.store.uri": "file:///disks/0/hadoop/falcon/store"
}
},
{
"hive-site": {
"hive.auto.convert.join.noconditionaltask.size" : "716177408",
"hive.tez.container.size" : "2048",
"hive.tez.java.opts" : "-server -Xmx1638m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC",
"hive.metastore.uris" : "thrift://%HOSTGROUP::master_3%:9083"
}
}
]
configurations = evaluation_configurations if scenario_id == 'evaluation' else standard_configurations
data = {
"configurations" : configurations,
"host_groups": host_groups,
"Blueprints" : {
"blueprint_name" : blueprint_name,
"stack_name" : "HDP",
"stack_version" : "2.2"
}
}
data = json.dumps(data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=request_timeout)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
return 'myblueprint'
def initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers):
logger.info('Deploying cluster...')
url = '%s://localhost:%s/api/v1/clusters/%s' % (protocol, port, cluster_id)
if num_masters + num_workers < 4:
raise Exception('Cluster size must be 4 or greater')
data = {
"blueprint": blueprint_name,
"default_password": "admin",
"host_groups": [
]
}
for i in range(1, num_masters + 1):
data['host_groups'].append({
"name": "master_%d" % i,
"hosts": [{
"fqdn": get_fqdn(i)
}]
})
worker_hosts = []
for i in range(num_masters + 1, num_masters + num_workers + 1):
worker_hosts.append({
"fqdn": get_fqdn(i)
})
data['host_groups'].append({
"name": "workers",
"hosts": worker_hosts
})
data = json.dumps(data)
pprint.pprint('data=' + data)
request = urllib2.Request(url, data)
request.add_header('Authorization', get_ambari_auth_string())
request.add_header('X-Requested-By', 'ambari')
request.add_header('Content-Type', 'text/plain')
try:
response = urllib2.urlopen(request, timeout=120)
pprint.pprint(response.read())
except urllib2.HTTPError as e:
logger.error('Cluster deployment failed: ' + e.read())
raise e
def poll_until_cluster_deployed(cluster_id, timeout):
url = '%s://localhost:%s/api/v1/clusters/%s/requests/1?fields=Requests/progress_percent,Requests/request_status' % (protocol, port, cluster_id)
deploy_success = False
deploy_finished = False
start_time = time.time()
logger.info('poll until function')
while time.time() - start_time < timeout:
request = urllib2.Request(url)
request.add_header("Authorization", get_ambari_auth_string())
try:
result = urllib2.urlopen(request, timeout=request_timeout).read()
pprint.pprint(result)
if (result is not None):
jsonResult = json.loads(result)
if jsonResult['Requests']['request_status'] == 'COMPLETED':
deploy_success = True
if int(jsonResult['Requests']['progress_percent']) == 100 or jsonResult['Requests']['request_status'] == 'FAILED':
deploy_finished = True
break
except:
logger.info('Could not poll deploy status from the server.')
time.sleep(5)
if not deploy_finished:
raise TimeoutException('Timed out while waiting for cluster deployment to finish')
elif not deploy_success:
raise Exception('Cluster deploy failed')
if action == 'bootstrap':
masters_iplist = masters_iplist.split(',')
workers_iplist = workers_iplist.split(',')
ambari_server_hostname = get_hostname(1)
all_hostnames = map((lambda i: get_hostname(i)), range(1, num_masters + num_workers + 1))
logger.info(all_hostnames)
host_ip_map = get_host_ip_map(all_hostnames)
update_etc_hosts(host_ip_map)
update_ambari_agent_ini(ambari_server_hostname)
patch_ambari_agent()
run_system_command('chkconfig ambari-agent on')
logger.info('Starting ambari-agent...')
run_system_command('ambari-agent start')
logger.info('ambari-agent started')
if is_ambari_server_host():
run_system_command('chkconfig ambari-server on')
logger.info('Running ambari-server setup...')
run_system_command('ambari-server setup -s -j /usr/jdk64/jdk1.7.0_45')
logger.info('ambari-server setup finished')
if protocol == 'https':
logger.info('Enabling HTTPS...')
enable_https()
logger.info('HTTPS enabled')
logger.info('Starting ambari-server...')
run_system_command('ambari-server start')
logger.info('ambari-server started')
try:
set_admin_password(admin_password, 60 * 2)
# set current_admin_password so that HTTP requests to Ambari start using the new user-specified password
current_admin_password = admin_password
poll_until_all_agents_registered(num_masters + num_workers, 60 * 4)
blueprint_name = create_blueprint(scenario_id)
initiate_cluster_deploy(blueprint_name, cluster_id, num_masters, num_workers)
except:
logger.error('Failed VM Bootstrap')
sys.exit(1)
else:
try:
current_admin_password = admin_password
poll_until_cluster_deployed(cluster_id, check_timeout_seconds)
except TimeoutException as e:
logger.info(e)
if report_timeout_fail:
logger.error('Failed cluster deployment')
sys.exit(1)
else:
logger.info('Cluster deployment has not completed')
sys.exit(0)
except:
logger.error('Failed cluster deployment')
sys.exit(1)
logger.info('Finished VM Bootstrap successfully')
sys.exit(0)
| mit |
c7zero/chipsec | chipsec/hal/uefi_common.py | 5 | 76636 | #!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
Common UEFI/EFI functionality including UEFI variables, Firmware Volumes, Secure Boot variables, S3 boot-script, UEFI tables, etc.
"""
import os
import struct
from collections import namedtuple
from chipsec.file import *
from chipsec.logger import *
#from chipsec.helper.oshelper import helper
################################################################################################
#
# EFI Variable and Variable Store Defines
#
################################################################################################
# UDK2010.SR1\MdeModulePkg\Include\Guid\VariableFormat.h
#
# #ifndef __VARIABLE_FORMAT_H__
# #define __VARIABLE_FORMAT_H__
#
# #define EFI_VARIABLE_GUID \
# { 0xddcf3616, 0x3275, 0x4164, { 0x98, 0xb6, 0xfe, 0x85, 0x70, 0x7f, 0xfe, 0x7d } }
#
# extern EFI_GUID gEfiVariableGuid;
#
# ///
# /// Alignment of variable name and data, according to the architecture:
# /// * For IA-32 and Intel(R) 64 architectures: 1.
# /// * For IA-64 architecture: 8.
# ///
# #if defined (MDE_CPU_IPF)
# #define ALIGNMENT 8
# #else
# #define ALIGNMENT 1
# #endif
#
# //
# // GET_PAD_SIZE calculates the miminal pad bytes needed to make the current pad size satisfy the alignment requirement.
# //
# #if (ALIGNMENT == 1)
# #define GET_PAD_SIZE(a) (0)
# #else
# #define GET_PAD_SIZE(a) (((~a) + 1) & (ALIGNMENT - 1))
# #endif
#
# ///
# /// Alignment of Variable Data Header in Variable Store region.
# ///
# #define HEADER_ALIGNMENT 4
# #define HEADER_ALIGN(Header) (((UINTN) (Header) + HEADER_ALIGNMENT - 1) & (~(HEADER_ALIGNMENT - 1)))
#
# ///
# /// Status of Variable Store Region.
# ///
# typedef enum {
# EfiRaw,
# EfiValid,
# EfiInvalid,
# EfiUnknown
# } VARIABLE_STORE_STATUS;
#
# #pragma pack(1)
#
# #define VARIABLE_STORE_SIGNATURE EFI_VARIABLE_GUID
#
# ///
# /// Variable Store Header Format and State.
# ///
# #define VARIABLE_STORE_FORMATTED 0x5a
# #define VARIABLE_STORE_HEALTHY 0xfe
#
# ///
# /// Variable Store region header.
# ///
# typedef struct {
# ///
# /// Variable store region signature.
# ///
# EFI_GUID Signature;
# ///
# /// Size of entire variable store,
# /// including size of variable store header but not including the size of FvHeader.
# ///
# UINT32 Size;
# ///
# /// Variable region format state.
# ///
# UINT8 Format;
# ///
# /// Variable region healthy state.
# ///
# UINT8 State;
# UINT16 Reserved;
# UINT32 Reserved1;
# } VARIABLE_STORE_HEADER;
#
# ///
# /// Variable data start flag.
# ///
# #define VARIABLE_DATA 0x55AA
#
# ///
# /// Variable State flags.
# ///
# #define VAR_IN_DELETED_TRANSITION 0xfe ///< Variable is in obsolete transition.
# #define VAR_DELETED 0xfd ///< Variable is obsolete.
# #define VAR_HEADER_VALID_ONLY 0x7f ///< Variable header has been valid.
# #define VAR_ADDED 0x3f ///< Variable has been completely added.
#
# ///
# /// Single Variable Data Header Structure.
# ///
# typedef struct {
# ///
# /// Variable Data Start Flag.
# ///
# UINT16 StartId;
# ///
# /// Variable State defined above.
# ///
# UINT8 State;
# UINT8 Reserved;
# ///
# /// Attributes of variable defined in UEFI specification.
# ///
# UINT32 Attributes;
# ///
# /// Size of variable null-terminated Unicode string name.
# ///
# UINT32 NameSize;
# ///
# /// Size of the variable data without this header.
# ///
# UINT32 DataSize;
# ///
# /// A unique identifier for the vendor that produces and consumes this varaible.
# ///
# EFI_GUID VendorGuid;
# } VARIABLE_HEADER;
#
# #pragma pack()
#
# typedef struct _VARIABLE_INFO_ENTRY VARIABLE_INFO_ENTRY;
#
# ///
# /// This structure contains the variable list that is put in EFI system table.
# /// The variable driver collects all variables that were used at boot service time and produces this list.
# /// This is an optional feature to dump all used variables in shell environment.
# ///
# struct _VARIABLE_INFO_ENTRY {
# VARIABLE_INFO_ENTRY *Next; ///< Pointer to next entry.
# EFI_GUID VendorGuid; ///< Guid of Variable.
# CHAR16 *Name; ///< Name of Variable.
# UINT32 Attributes; ///< Attributes of variable defined in UEFI specification.
# UINT32 ReadCount; ///< Number of times to read this variable.
# UINT32 WriteCount; ///< Number of times to write this variable.
# UINT32 DeleteCount; ///< Number of times to delete this variable.
# UINT32 CacheCount; ///< Number of times that cache hits this variable.
# BOOLEAN Volatile; ///< TRUE if volatile, FALSE if non-volatile.
# };
#
# #endif // _EFI_VARIABLE_H_
#
# Variable Store Header Format and State.
#
VARIABLE_STORE_FORMATTED = 0x5a
VARIABLE_STORE_HEALTHY = 0xfe
#
# Variable Store region header.
#
#typedef struct {
# ///
# /// Variable store region signature.
# ///
# EFI_GUID Signature;
# ///
# /// Size of entire variable store,
# /// including size of variable store header but not including the size of FvHeader.
# ///
# UINT32 Size;
# ///
# /// Variable region format state.
# ///
# UINT8 Format;
# ///
# /// Variable region healthy state.
# ///
# UINT8 State;
# UINT16 Reserved;
# UINT32 Reserved1;
#} VARIABLE_STORE_HEADER;
#
# Signature is EFI_GUID (guid0 guid1 guid2 guid3)
VARIABLE_STORE_HEADER_FMT = '<8sIBBHI'
VARIABLE_STORE_HEADER_SIZE = struct.calcsize( VARIABLE_STORE_HEADER_FMT )
class VARIABLE_STORE_HEADER( namedtuple('VARIABLE_STORE_HEADER', 'guid0 guid1 guid2 guid3 Size Format State Reserved Reserved1') ):
__slots__ = ()
def __str__(self):
return """
EFI Variable Store
-----------------------------
Signature : {%08X-%04X-%04X-%04s-%06s}
Size : 0x%08X bytes
Format : 0x%02X
State : 0x%02X
Reserved : 0x%04X
Reserved1 : 0x%08X
""" % ( self.guid0, self.guid1, self.guid2, self.guid3[:2].encode('hex').upper(), self.guid3[-6::].encode('hex').upper(), self.Size, self.Format, self.State, self.Reserved, self.Reserved1 )
#
# Variable data start flag.
#
VARIABLE_DATA = 0x55aa
VARIABLE_DATA_SIGNATURE = struct.pack('=H', VARIABLE_DATA )
#
# Variable Attributes
#
EFI_VARIABLE_NON_VOLATILE = 0x00000001 # Variable is non volatile
EFI_VARIABLE_BOOTSERVICE_ACCESS = 0x00000002 # Variable is boot time accessible
EFI_VARIABLE_RUNTIME_ACCESS = 0x00000004 # Variable is run-time accessible
EFI_VARIABLE_HARDWARE_ERROR_RECORD = 0x00000008 #
EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS = 0x00000010 # Variable is authenticated
EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS = 0x00000020 # Variable is time based authenticated
EFI_VARIABLE_APPEND_WRITE = 0x00000040 # Variable allows append
UEFI23_1_AUTHENTICATED_VARIABLE_ATTRIBUTES = (EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
def IS_VARIABLE_ATTRIBUTE(_c, _Mask):
return ( (_c & _Mask) != 0 )
def IS_EFI_VARIABLE_AUTHENTICATED( attr ):
return ( IS_VARIABLE_ATTRIBUTE( attr, EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS ) or IS_VARIABLE_ATTRIBUTE( attr, EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS ) )
MAX_VARIABLE_SIZE = 1024
MAX_NVRAM_SIZE = 1024*1024
def get_nvar_name(nvram, name_offset, isAscii):
if isAscii:
nend = nvram.find('\x00', name_offset)
name_size = nend - name_offset + 1 # add trailing zero symbol
name = nvram[name_offset:nend]
return (name, name_size)
else:
nend = nvram.find('\x00\x00', name_offset)
while (nend & 1) == 1:
nend = nend + 1
nend = nvram.find('\x00\x00', nend)
name_size = nend - name_offset + 2 # add trailing zero symbol
name = unicode(nvram[name_offset:nend], "utf-16-le")
return (name, name_size)
VARIABLE_SIGNATURE_VSS = VARIABLE_DATA_SIGNATURE
################################################################################################
#
# EFI Firmware Volume Defines
#
################################################################################################
FFS_ATTRIB_FIXED = 0x04
FFS_ATTRIB_DATA_ALIGNMENT = 0x38
FFS_ATTRIB_CHECKSUM = 0x40
EFI_FILE_HEADER_CONSTRUCTION = 0x01
EFI_FILE_HEADER_VALID = 0x02
EFI_FILE_DATA_VALID = 0x04
EFI_FILE_MARKED_FOR_UPDATE = 0x08
EFI_FILE_DELETED = 0x10
EFI_FILE_HEADER_INVALID = 0x20
FFS_FIXED_CHECKSUM = 0xAA
EFI_FVB2_ERASE_POLARITY = 0x00000800
EFI_FV_FILETYPE_ALL = 0x00
EFI_FV_FILETYPE_RAW = 0x01
EFI_FV_FILETYPE_FREEFORM = 0x02
EFI_FV_FILETYPE_SECURITY_CORE = 0x03
EFI_FV_FILETYPE_PEI_CORE = 0x04
EFI_FV_FILETYPE_DXE_CORE = 0x05
EFI_FV_FILETYPE_PEIM = 0x06
EFI_FV_FILETYPE_DRIVER = 0x07
EFI_FV_FILETYPE_COMBINED_PEIM_DRIVER = 0x08
EFI_FV_FILETYPE_APPLICATION = 0x09
EFI_FV_FILETYPE_FIRMWARE_VOLUME_IMAGE = 0x0b
EFI_FV_FILETYPE_FFS_PAD = 0xf0
FILE_TYPE_NAMES = {0x00: 'FV_ALL', 0x01: 'FV_RAW', 0x02: 'FV_FREEFORM', 0x03: 'FV_SECURITY_CORE', 0x04: 'FV_PEI_CORE', 0x05: 'FV_DXE_CORE', 0x06: 'FV_PEIM', 0x07: 'FV_DRIVER', 0x08: 'FV_COMBINED_PEIM_DRIVER', 0x09: 'FV_APPLICATION', 0x0B: 'FV_FVIMAGE', 0x0F: 'FV_FFS_PAD'}
EFI_SECTION_ALL = 0x00
EFI_SECTION_COMPRESSION = 0x01
EFI_SECTION_GUID_DEFINED = 0x02
EFI_SECTION_PE32 = 0x10
EFI_SECTION_PIC = 0x11
EFI_SECTION_TE = 0x12
EFI_SECTION_DXE_DEPEX = 0x13
EFI_SECTION_VERSION = 0x14
EFI_SECTION_USER_INTERFACE = 0x15
EFI_SECTION_COMPATIBILITY16 = 0x16
EFI_SECTION_FIRMWARE_VOLUME_IMAGE = 0x17
EFI_SECTION_FREEFORM_SUBTYPE_GUID = 0x18
EFI_SECTION_RAW = 0x19
EFI_SECTION_PEI_DEPEX = 0x1B
EFI_SECTION_SMM_DEPEX = 0x1C
SECTION_NAMES = {0x00: 'S_ALL', 0x01: 'S_COMPRESSION', 0x02: 'S_GUID_DEFINED', 0x10: 'S_PE32', 0x11: 'S_PIC', 0x12: 'S_TE', 0x13: 'S_DXE_DEPEX', 0x14: 'S_VERSION', 0x15: 'S_USER_INTERFACE', 0x16: 'S_COMPATIBILITY16', 0x17: 'S_FV_IMAGE', 0x18: 'S_FREEFORM_SUBTYPE_GUID', 0x19: 'S_RAW', 0x1B: 'S_PEI_DEPEX', 0x1C: 'S_SMM_DEPEX'}
EFI_SECTIONS_EXE = [EFI_SECTION_PE32, EFI_SECTION_TE, EFI_SECTION_PIC, EFI_SECTION_COMPATIBILITY16]
GUID = "<IHH8s"
guid_size = struct.calcsize(GUID)
EFI_COMPRESSION_SECTION = "<IB"
EFI_COMPRESSION_SECTION_size = struct.calcsize(EFI_COMPRESSION_SECTION)
EFI_GUID_DEFINED_SECTION = "<IHH8sHH"
EFI_GUID_DEFINED_SECTION_size = struct.calcsize(EFI_GUID_DEFINED_SECTION)
EFI_CRC32_GUIDED_SECTION_EXTRACTION_PROTOCOL_GUID = "FC1BCDB0-7D31-49AA-936A-A4600D9DD083"
VARIABLE_STORE_FV_GUID = 'FFF12B8D-7696-4C8B-A985-2747075B4F50'
EFI_FIRMWARE_FILE_SYSTEM_GUID = "7A9354D9-0468-444A-81CE-0BF617D890DF"
EFI_FIRMWARE_FILE_SYSTEM2_GUID = "8C8CE578-8A3D-4F1C-9935-896185C32DD3"
EFI_FIRMWARE_FILE_SYSTEM3_GUID = "5473C07A-3DCB-4DCA-BD6F-1E9689E7349A"
EFI_FS_GUIDS = [EFI_FIRMWARE_FILE_SYSTEM3_GUID, EFI_FIRMWARE_FILE_SYSTEM2_GUID, EFI_FIRMWARE_FILE_SYSTEM_GUID]
LZMA_CUSTOM_DECOMPRESS_GUID = "EE4E5898-3914-4259-9D6E-DC7BD79403CF"
FIRMWARE_VOLUME_GUID = "24400798-3807-4A42-B413-A1ECEE205DD8"
TIANO_DECOMPRESSED_GUID = "A31280AD-481E-41B6-95E8-127F4C984779"
VOLUME_SECTION_GUID = "367AE684-335D-4671-A16D-899DBFEA6B88"
#
# Compression Types
#
COMPRESSION_TYPE_NONE = 0
COMPRESSION_TYPE_TIANO = 1
COMPRESSION_TYPE_LZMA = 2
COMPRESSION_TYPES = [COMPRESSION_TYPE_NONE, COMPRESSION_TYPE_TIANO, COMPRESSION_TYPE_LZMA]
################################################################################################
#
# Misc Defines
#
################################################################################################
#
# Status codes
# edk2: MdePkg/Include/Base.h
#
# @TODO
#define ENCODE_ERROR(StatusCode) ((RETURN_STATUS)(MAX_BIT | (StatusCode)))
#define ENCODE_WARNING(a) (a)
class StatusCode:
EFI_SUCCESS = 0
EFI_LOAD_ERROR = 1
EFI_INVALID_PARAMETER = 2
EFI_UNSUPPORTED = 3
EFI_BAD_BUFFER_SIZE = 4
EFI_BUFFER_TOO_SMALL = 5
EFI_NOT_READY = 6
EFI_DEVICE_ERROR = 7
EFI_WRITE_PROTECTED = 8
EFI_OUT_OF_RESOURCES = 9
EFI_VOLUME_CORRUPTED = 10
EFI_VOLUME_FULL = 11
EFI_NO_MEDIA = 12
EFI_MEDIA_CHANGED = 13
EFI_NOT_FOUND = 14
EFI_ACCESS_DENIED = 15
EFI_NO_RESPONSE = 16
EFI_NO_MAPPING = 17
EFI_TIMEOUT = 18
EFI_NOT_STARTED = 19
EFI_ALREADY_STARTED = 20
EFI_ABORTED = 21
EFI_ICMP_ERROR = 22
EFI_TFTP_ERROR = 23
EFI_PROTOCOL_ERROR = 24
EFI_INCOMPATIBLE_VERSION = 25
EFI_SECURITY_VIOLATION = 26
EFI_CRC_ERROR = 27
EFI_END_OF_MEDIA = 28
EFI_END_OF_FILE = 31
EFI_INVALID_LANGUAGE = 32
EFI_COMPROMISED_DATA = 33
EFI_HTTP_ERROR = 35
'''
EFI_WARN_UNKNOWN_GLYPH = 1
EFI_WARN_DELETE_FAILURE = 2
EFI_WARN_WRITE_FAILURE = 3
EFI_WARN_BUFFER_TOO_SMALL = 4
EFI_WARN_STALE_DATA = 5
EFI_WARN_FILE_SYSTEM = 6
'''
EFI_STATUS_DICT = {
StatusCode.EFI_SUCCESS :"EFI_SUCCESS",
StatusCode.EFI_LOAD_ERROR :"EFI_LOAD_ERROR",
StatusCode.EFI_INVALID_PARAMETER :"EFI_INVALID_PARAMETER",
StatusCode.EFI_UNSUPPORTED :"EFI_UNSUPPORTED",
StatusCode.EFI_BAD_BUFFER_SIZE :"EFI_BAD_BUFFER_SIZE",
StatusCode.EFI_BUFFER_TOO_SMALL :"EFI_BUFFER_TOO_SMALL",
StatusCode.EFI_NOT_READY :"EFI_NOT_READY",
StatusCode.EFI_DEVICE_ERROR :"EFI_DEVICE_ERROR",
StatusCode.EFI_WRITE_PROTECTED :"EFI_WRITE_PROTECTED",
StatusCode.EFI_OUT_OF_RESOURCES :"EFI_OUT_OF_RESOURCES",
StatusCode.EFI_VOLUME_CORRUPTED :"EFI_VOLUME_CORRUPTED",
StatusCode.EFI_VOLUME_FULL :"EFI_VOLUME_FULL",
StatusCode.EFI_NO_MEDIA :"EFI_NO_MEDIA",
StatusCode.EFI_MEDIA_CHANGED :"EFI_MEDIA_CHANGED",
StatusCode.EFI_NOT_FOUND :"EFI_NOT_FOUND",
StatusCode.EFI_ACCESS_DENIED :"EFI_ACCESS_DENIED",
StatusCode.EFI_NO_RESPONSE :"EFI_NO_RESPONSE",
StatusCode.EFI_NO_MAPPING :"EFI_NO_MAPPING",
StatusCode.EFI_TIMEOUT :"EFI_TIMEOUT",
StatusCode.EFI_NOT_STARTED :"EFI_NOT_STARTED",
StatusCode.EFI_ALREADY_STARTED :"EFI_ALREADY_STARTED",
StatusCode.EFI_ABORTED :"EFI_ABORTED",
StatusCode.EFI_ICMP_ERROR :"EFI_ICMP_ERROR",
StatusCode.EFI_TFTP_ERROR :"EFI_TFTP_ERROR",
StatusCode.EFI_PROTOCOL_ERROR :"EFI_PROTOCOL_ERROR",
StatusCode.EFI_INCOMPATIBLE_VERSION:"EFI_INCOMPATIBLE_VERSION",
StatusCode.EFI_SECURITY_VIOLATION :"EFI_SECURITY_VIOLATION",
StatusCode.EFI_CRC_ERROR :"EFI_CRC_ERROR",
StatusCode.EFI_END_OF_MEDIA :"EFI_END_OF_MEDIA",
StatusCode.EFI_END_OF_FILE :"EFI_END_OF_FILE",
StatusCode.EFI_INVALID_LANGUAGE :"EFI_INVALID_LANGUAGE",
StatusCode.EFI_COMPROMISED_DATA :"EFI_COMPROMISED_DATA",
StatusCode.EFI_HTTP_ERROR :"EFI_HTTP_ERROR"
}
EFI_GUID_FMT = "IHH8s"
def EFI_GUID( guid0, guid1, guid2, guid3 ):
return ("%08X-%04X-%04X-%04s-%06s" % (guid0, guid1, guid2, guid3[:2].encode('hex').upper(), guid3[-6::].encode('hex').upper()) )
def align(of, size):
of = (((of + size - 1)/size) * size)
return of
def bit_set(value, mask, polarity = False):
if polarity: value = ~value
return ( (value & mask) == mask )
def get_3b_size(s):
return (ord(s[0]) + (ord(s[1]) << 8) + (ord(s[2]) << 16))
def guid_str(guid0, guid1, guid2, guid3):
guid = "%08X-%04X-%04X-%04s-%06s" % (guid0, guid1, guid2, guid3[:2].encode('hex').upper(), guid3[-6::].encode('hex').upper())
return guid
# #################################################################################################
#
# UEFI Firmware Volume Parsing/Modification Functionality
#
# #################################################################################################
def align_image(image, size=8, fill='\x00'):
return image.ljust(((len(image) + size - 1) / size) * size, fill)
def get_guid_bin(guid):
values = guid.split('-')
if [len(x) for x in values] == [8, 4, 4, 4, 12]:
values = values[0:3] + [values[3][0:2], values[3][2:4]] + [values[4][x:x+2] for x in xrange(0, 12, 2)]
values = [int(x, 16) for x in values]
return struct.pack('<LHHBBBBBBBB', *tuple(values))
return ''
def assemble_uefi_file(guid, image):
EFI_FFS_FILE_HEADER = "<16sHBBL"
FileHeaderSize = struct.calcsize(EFI_FFS_FILE_HEADER)
Type = EFI_FV_FILETYPE_FREEFORM
CheckSum = 0x0000;
Attributes = 0x40
Size = FileHeaderSize + len(image)
State = 0xF8
SizeState = (Size & 0x00FFFFFF) | (State << 24)
FileHeader = struct.pack(EFI_FFS_FILE_HEADER, get_guid_bin(guid), CheckSum, Type, Attributes, (Size & 0x00FFFFFF))
hsum = FvChecksum8(FileHeader)
if (Attributes & FFS_ATTRIB_CHECKSUM):
fsum = FvChecksum8(image)
else:
fsum = FFS_FIXED_CHECKSUM
CheckSum = (hsum | (fsum << 8))
return struct.pack(EFI_FFS_FILE_HEADER, get_guid_bin(guid), CheckSum, Type, Attributes, SizeState) + image
def assemble_uefi_section(image, uncomressed_size, compression_type):
EFI_COMPRESSION_SECTION_HEADER = "<LLB"
SectionType = EFI_SECTION_COMPRESSION
SectionSize = struct.calcsize(EFI_COMPRESSION_SECTION_HEADER) + len(image)
SectionHeader = struct.pack(EFI_COMPRESSION_SECTION_HEADER, (SectionSize & 0x00FFFFFF) | (SectionType << 24), uncomressed_size, compression_type)
return SectionHeader + image
def assemble_uefi_raw(image):
return align_image(struct.pack('<L', ((len(image) + 4) & 0x00FFFFFF) + (EFI_SECTION_RAW << 24)) + image)
def FvSum8(buffer):
sum8 = 0
for b in buffer:
sum8 = (sum8 + ord(b)) & 0xff
return sum8
def FvChecksum8(buffer):
return ((0x100 - FvSum8(buffer)) & 0xff)
def FvSum16(buffer):
sum16 = 0
blen = len(buffer)/2
i = 0
while i < blen:
el16 = ord(buffer[2*i]) | (ord(buffer[2*i+1]) << 8)
sum16 = (sum16 + el16) & 0xffff
i = i + 1
return sum16
def FvChecksum16(buffer):
return ((0x10000 - FvSum16(buffer)) & 0xffff)
def ValidateFwVolumeHeader(ZeroVector, FsGuid, FvLength, Attributes, HeaderLength, Checksum, ExtHeaderOffset, Reserved, CalcSum, size):
zero_vector = (ZeroVector == '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
fv_rsvd = (Reserved == 0)
fs_guid = (FsGuid in (EFI_FS_GUIDS + [VARIABLE_STORE_FV_GUID]))
fv_len = (FvLength <= size)
fv_header_len = (ExtHeaderOffset < FvLength) and (HeaderLength < FvLength)
#sum = (Checksum == CalcSum)
return fv_rsvd and fv_len and fv_header_len
def NextFwVolume(buffer, off = 0):
fof = off
EFI_FIRMWARE_VOLUME_HEADER = "<16sIHH8sQIIHHHBB"
vf_header_size = struct.calcsize(EFI_FIRMWARE_VOLUME_HEADER)
EFI_FV_BLOCK_MAP_ENTRY = "<II"
size = len(buffer)
res = (None, None, None, None, None, None, None, None, None)
while ((fof + vf_header_size) < size):
fof = buffer.find("_FVH", fof)
if fof < 0x28: return res
fof = fof - 0x28
ZeroVector, FileSystemGuid0, FileSystemGuid1,FileSystemGuid2,FileSystemGuid3, \
FvLength, Signature, Attributes, HeaderLength, Checksum, ExtHeaderOffset, \
Reserved, Revision = struct.unpack(EFI_FIRMWARE_VOLUME_HEADER, buffer[fof:fof+vf_header_size])
'''
print "\nFV volume offset: 0x%08X" % fof
print "\tFvLength: 0x%08X" % FvLength
print "\tAttributes: 0x%08X" % Attributes
print "\tHeaderLength: 0x%04X" % HeaderLength
print "\tChecksum: 0x%04X" % Checksum
print "\tRevision: 0x%02X" % Revision
print "\tExtHeaderOffset: 0x%02X" % ExtHeaderOffset
print "\tReserved: 0x%02X" % Reserved
'''
#print "FFS Guid: %s" % guid_str(FileSystemGuid0, FileSystemGuid1,FileSystemGuid2, FileSystemGuid3)
#print "FV Checksum: 0x%04X (0x%04X)" % (Checksum, FvChecksum16(buffer[fof:fof+HeaderLength]))
#'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
fvh = struct.pack(EFI_FIRMWARE_VOLUME_HEADER, ZeroVector, \
FileSystemGuid0, FileSystemGuid1,FileSystemGuid2,FileSystemGuid3, \
FvLength, Signature, Attributes, HeaderLength, 0, ExtHeaderOffset, \
Reserved, Revision)
if (len(fvh) < HeaderLength):
#print "len(fvh)=%d, HeaderLength=%d" % (len(fvh), HeaderLength)
tail = buffer[fof+len(fvh):fof+HeaderLength]
fvh = fvh + tail
CalcSum = FvChecksum16(fvh)
FsGuid = guid_str(FileSystemGuid0, FileSystemGuid1,FileSystemGuid2,FileSystemGuid3)
if (ValidateFwVolumeHeader(ZeroVector, FsGuid, FvLength, Attributes, HeaderLength, Checksum, ExtHeaderOffset, Reserved, CalcSum, size)):
res = (fof, FsGuid, FvLength, Attributes, HeaderLength, Checksum, ExtHeaderOffset, buffer[fof:fof+FvLength], CalcSum)
return res
else:
fof += 0x2C
return res
EFI_FFS_FILE_HEADER = "<IHH8sHBB3sB"
file_header_size = struct.calcsize(EFI_FFS_FILE_HEADER)
def NextFwFile(FvImage, FvLength, fof, polarity):
fof = align(fof, 8)
cur_offset = fof
next_offset = None
res = None
update_or_deleted = False
if (fof + file_header_size) <= min(FvLength, len(FvImage)):
if ('\xff\xff\xff\xff' == FvImage[fof+file_header_size-4:fof+file_header_size]):
next_offset = fof + 8
return (cur_offset, next_offset, None, None, None, None, None, None, None, None, update_or_deleted, None)
Name0, Name1, Name2, Name3, IntegrityCheck, Type, Attributes, Size, State = struct.unpack(EFI_FFS_FILE_HEADER, FvImage[fof:fof+file_header_size])
fsize = get_3b_size(Size);
update_or_deleted = (bit_set(State, EFI_FILE_MARKED_FOR_UPDATE, polarity)) or (bit_set(State, EFI_FILE_DELETED, polarity))
if (not bit_set(State, EFI_FILE_HEADER_VALID, polarity)) or (bit_set(State, EFI_FILE_HEADER_INVALID, polarity)):
next_offset = align(fof + 1, 8)
elif (not bit_set(State, EFI_FILE_DATA_VALID, polarity)):
next_offset = align(fof + 1, 8)
elif fsize == 0:
next_offset = align(fof + 1, 8)
else:
next_offset = fof + fsize
next_offset = align(next_offset, 8)
Name = guid_str(Name0, Name1, Name2, Name3)
fheader = struct.pack(EFI_FFS_FILE_HEADER, Name0, Name1, Name2, Name3, 0, Type, Attributes, Size, 0)
hsum = FvChecksum8(fheader)
if (Attributes & FFS_ATTRIB_CHECKSUM):
fsum = FvChecksum8(FvImage[fof+file_header_size:fof+fsize])
else:
fsum = FFS_FIXED_CHECKSUM
CalcSum = (hsum | (fsum << 8))
res = (cur_offset, next_offset, Name, Type, Attributes, State, IntegrityCheck, fsize, FvImage[fof:fof+fsize], file_header_size, update_or_deleted, CalcSum)
if res is None: return (cur_offset, next_offset, None, None, None, None, None, None, None, None, update_or_deleted, None)
else: return res
EFI_COMMON_SECTION_HEADER = "<3sB"
EFI_COMMON_SECTION_HEADER_size = struct.calcsize(EFI_COMMON_SECTION_HEADER)
def NextFwFileSection(sections, ssize, sof, polarity):
# offset, next_offset, SecName, SecType, SecBody, SecHeaderSize
cur_offset = sof
if (sof + EFI_COMMON_SECTION_HEADER_size) < ssize:
header = sections[sof:sof+EFI_COMMON_SECTION_HEADER_size]
if len(header) < EFI_COMMON_SECTION_HEADER_size: return (None, None, None, None, None, None)
Size, Type = struct.unpack(EFI_COMMON_SECTION_HEADER, header)
Size = get_3b_size(Size)
sec_name = "S_UNKNOWN_%02X" % Type
if Type in SECTION_NAMES.keys():
sec_name = SECTION_NAMES[Type]
if (Size == 0xffffff and Type == 0xff) or (Size == 0):
sof = align(sof + 4, 4)
return (cur_offset, sof, None, None, None, None)
sec_body = sections[sof:sof+Size]
sof = align(sof + Size, 4)
return (cur_offset, sof, sec_name, Type, sec_body, EFI_COMMON_SECTION_HEADER_size)
return (None, None, None, None, None, None)
def DecodeSection(SecType, SecBody, SecHeaderSize):
pass
# #################################################################################################
#
# UEFI Variable (NVRAM) Parsing Functionality
#
# #################################################################################################
# typedef struct {
# ///
# /// Type of the signature. GUID signature types are defined in below.
# ///
# EFI_GUID SignatureType;
# ///
# /// Total size of the signature list, including this header.
# ///
# UINT32 SignatureListSize;
# ///
# /// Size of the signature header which precedes the array of signatures.
# ///
# UINT32 SignatureHeaderSize;
# ///
# /// Size of each signature.
# ///
# UINT32 SignatureSize;
# ///
# /// Header before the array of signatures. The format of this header is specified
# /// by the SignatureType.
# /// UINT8 SignatureHeader[SignatureHeaderSize];
# ///
# /// An array of signatures. Each signature is SignatureSize bytes in length.
# /// EFI_SIGNATURE_DATA Signatures[][SignatureSize];
# ///
# } EFI_SIGNATURE_LIST;
SIGNATURE_LIST = "<IHH8sIII"
SIGNATURE_LIST_size = struct.calcsize(SIGNATURE_LIST)
def parse_sha256(data):
return
def parse_rsa2048(data):
return
def parse_rsa2048_sha256(data):
return
def parse_sha1(data):
return
def parse_rsa2048_sha1(data):
return
def parse_x509(data):
return
def parse_sha224(data):
return
def parse_sha384(data):
return
def parse_sha512(data):
return
def parse_pkcs7(data):
return
sig_types = {"C1C41626-504C-4092-ACA9-41F936934328": ("EFI_CERT_SHA256_GUID", parse_sha256, 0x30, "SHA256"), \
"3C5766E8-269C-4E34-AA14-ED776E85B3B6": ("EFI_CERT_RSA2048_GUID", parse_rsa2048, 0x110, "RSA2048"), \
"E2B36190-879B-4A3D-AD8D-F2E7BBA32784": ("EFI_CERT_RSA2048_SHA256_GUID", parse_rsa2048_sha256, 0x110, "RSA2048_SHA256"), \
"826CA512-CF10-4AC9-B187-BE01496631BD": ("EFI_CERT_SHA1_GUID", parse_sha1, 0x24, "SHA1"), \
"67F8444F-8743-48F1-A328-1EAAB8736080": ("EFI_CERT_RSA2048_SHA1_GUID", parse_rsa2048_sha1, 0x110, "RSA2048_SHA1"), \
"A5C059A1-94E4-4AA7-87B5-AB155C2BF072": ("EFI_CERT_X509_GUID", parse_x509, 0, "X509"), \
"0B6E5233-A65C-44C9-9407-D9AB83BFC8BD": ("EFI_CERT_SHA224_GUID", parse_sha224, 0x2c, "SHA224"), \
"FF3E5307-9FD0-48C9-85F1-8AD56C701E01": ("EFI_CERT_SHA384_GUID", parse_sha384, 0x40, "SHA384"), \
"093E0FAE-A6C4-4F50-9F1B-D41E2B89C19A": ("EFI_CERT_SHA512_GUID", parse_sha512, 0x50, "SHA512"), \
"4AAFD29D-68DF-49EE-8AA9-347D375665A7": ("EFI_CERT_TYPE_PKCS7_GUID", parse_pkcs7, 0, "PKCS7") }
#def parse_db(db, var_name, path):
def parse_db( db, decode_dir ):
db_size = len(db)
if 0 == db_size:
return
dof = 0
nsig = 0
entries = []
# some platforms have 0's in the beginnig, skip all 0 (no known SignatureType starts with 0x00):
while (dof < db_size and db[dof] == '\x00'): dof = dof + 1
while (dof + SIGNATURE_LIST_size) < db_size:
SignatureType0, SignatureType1, SignatureType2, SignatureType3, SignatureListSize, SignatureHeaderSize, SignatureSize \
= struct.unpack(SIGNATURE_LIST, db[dof:dof+SIGNATURE_LIST_size])
# prevent infinite loop when parsing malformed var
if SignatureListSize == 0:
logger().log_bad("db parsing failed!")
return entries
SignatureType = guid_str(SignatureType0, SignatureType1, SignatureType2, SignatureType3)
short_name = "UNKNOWN"
sig_parse_f = None
sig_size = 0
if (SignatureType in sig_types.keys()):
sig_name, sig_parse_f, sig_size, short_name = sig_types[SignatureType]
#logger().log( "SignatureType : %s (%s)" % (SignatureType, sig_name) )
#logger().log( "SignatureListSize : 0x%08X" % SignatureListSize )
#logger().log( "SignatureHeaderSize : 0x%08X" % SignatureHeaderSize )
#logger().log( "SignatureSize : 0x%08X" % SignatureSize )
#logger().log( "Parsing..." )
if (((sig_size > 0) and (sig_size == SignatureSize)) or ((sig_size == 0) and (SignatureSize >= 0x10))):
sof = 0
sig_list = db[dof+SIGNATURE_LIST_size+SignatureHeaderSize:dof+SignatureListSize]
sig_list_size = len(sig_list)
while ((sof + guid_size) < sig_list_size):
sig_data = sig_list[sof:sof+SignatureSize]
owner0, owner1, owner2, owner3 = struct.unpack(GUID, sig_data[:guid_size])
owner = guid_str(owner0, owner1, owner2, owner3)
data = sig_data[guid_size:]
#logger().log( "owner: %s" % owner )
entries.append( data )
sig_file_name = "%s-%s-%02d.bin" % (short_name, owner, nsig)
sig_file_name = os.path.join(decode_dir, sig_file_name)
write_file(sig_file_name, data)
if (sig_parse_f != None):
sig_parse_f(data)
sof = sof + SignatureSize
nsig = nsig + 1
else:
err_str = "Wrong SignatureSize for %s type: 0x%X." % (SignatureType, SignatureSize)
if (sig_size > 0): err_str = err_str + " Must be 0x%X." % (sig_size)
else: err_str = err_str + " Must be >= 0x10."
logger().error( err_str )
entries.append( data )
sig_file_name = "%s-%s-%02d.bin" % (short_name, SignatureType, nsig)
sig_file_name = os.path.join(decode_dir, sig_file_name)
write_file(sig_file_name, data)
nsig = nsig + 1
dof = dof + SignatureListSize
return entries
def parse_efivar_file( fname, var=None ):
if not var:
var = read_file( fname )
#path, var_name = os.path.split( fname )
#var_name, ext = os.path.splitext( var_name )
var_path = fname + '.dir'
if not os.path.exists( var_path ):
os.makedirs( var_path )
parse_db( var, var_path )
########################################################################################################
#
# S3 Resume Boot-Script Parsing Functionality
#
########################################################################################################
BOOTSCRIPT_TABLE_OFFSET = 24
RUNTIME_SCRIPT_TABLE_BASE_OFFSET = 32
ACPI_VARIABLE_SET_STRUCT_SIZE = 0x48
S3_BOOTSCRIPT_VARIABLES = [ 'AcpiGlobalVariable' ]
MAX_S3_BOOTSCRIPT_ENTRY_LENGTH = 0x200
#
# MdePkg\Include\Pi\PiS3BootScript.h
#
#//*******************************************
#// EFI Boot Script Opcode definitions
#//*******************************************
#define EFI_BOOT_SCRIPT_IO_WRITE_OPCODE 0x00
#define EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE 0x01
#define EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE 0x02
#define EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE 0x03
#define EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE 0x04
#define EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE 0x05
#define EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE 0x06
#define EFI_BOOT_SCRIPT_STALL_OPCODE 0x07
#define EFI_BOOT_SCRIPT_DISPATCH_OPCODE 0x08
#define EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE 0x09
#define EFI_BOOT_SCRIPT_INFORMATION_OPCODE 0x0A
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE 0x0B
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE 0x0C
#define EFI_BOOT_SCRIPT_IO_POLL_OPCODE 0x0D
#define EFI_BOOT_SCRIPT_MEM_POLL_OPCODE 0x0E
#define EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE 0x0F
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE 0x10
class S3BootScriptOpcode:
EFI_BOOT_SCRIPT_IO_WRITE_OPCODE = 0x00
EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE = 0x01
EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE = 0x02
EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE = 0x03
EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE = 0x04
EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE = 0x05
EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE = 0x06
EFI_BOOT_SCRIPT_STALL_OPCODE = 0x07
EFI_BOOT_SCRIPT_DISPATCH_OPCODE = 0x08
#EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE = 0x09
#EFI_BOOT_SCRIPT_INFORMATION_OPCODE = 0x0A
#EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE = 0x0B
#EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE = 0x0C
#EFI_BOOT_SCRIPT_IO_POLL_OPCODE = 0x0D
#EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x0E
#EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE = 0x0F
#EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE = 0x10
#EFI_BOOT_SCRIPT_TABLE_OPCODE = 0xAA
EFI_BOOT_SCRIPT_TERMINATE_OPCODE = 0xFF
class S3BootScriptOpcode_MDE (S3BootScriptOpcode):
EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE = 0x09
EFI_BOOT_SCRIPT_INFORMATION_OPCODE = 0x0A
EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE = 0x0B
EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE = 0x0C
EFI_BOOT_SCRIPT_IO_POLL_OPCODE = 0x0D
EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x0E
EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE = 0x0F
EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE = 0x10
#
# EdkCompatibilityPkg\Foundation\Framework\Include\EfiBootScript.h
#
#define EFI_BOOT_SCRIPT_IO_WRITE_OPCODE 0x00
#define EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE 0x01
#define EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE 0x02
#define EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE 0x03
#define EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE 0x04
#define EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE 0x05
#define EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE 0x06
#define EFI_BOOT_SCRIPT_STALL_OPCODE 0x07
#define EFI_BOOT_SCRIPT_DISPATCH_OPCODE 0x08
#
#//
#// Extensions to boot script definitions
#//
#define EFI_BOOT_SCRIPT_MEM_POLL_OPCODE 0x09
#define EFI_BOOT_SCRIPT_INFORMATION_OPCODE 0x0A
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE 0x0B
#define EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE 0x0C
#
#define EFI_BOOT_SCRIPT_TABLE_OPCODE 0xAA
#define EFI_BOOT_SCRIPT_TERMINATE_OPCODE 0xFF
class S3BootScriptOpcode_EdkCompat (S3BootScriptOpcode):
EFI_BOOT_SCRIPT_MEM_POLL_OPCODE = 0x09
EFI_BOOT_SCRIPT_INFORMATION_OPCODE = 0x0A
EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE = 0x0B
EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE = 0x0C
EFI_BOOT_SCRIPT_TABLE_OPCODE = 0xAA
#
# Names of S3 Boot Script Opcodes
#
script_opcodes = {
S3BootScriptOpcode.EFI_BOOT_SCRIPT_IO_WRITE_OPCODE: "S3_BOOTSCRIPT_IO_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_IO_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_IO_READ_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_MEM_WRITE_OPCODE: "S3_BOOTSCRIPT_MEM_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_MEM_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_MEM_READ_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG_READ_WRITE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_SMBUS_EXECUTE_OPCODE: "S3_BOOTSCRIPT_SMBUS_EXECUTE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_STALL_OPCODE: "S3_BOOTSCRIPT_STALL",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_DISPATCH_OPCODE: "S3_BOOTSCRIPT_DISPATCH",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_DISPATCH_2_OPCODE: "S3_BOOTSCRIPT_DISPATCH_2",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_INFORMATION_OPCODE: "S3_BOOTSCRIPT_INFORMATION",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG2_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG2_WRITE",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG2_READ_WRITE_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG2_READ_WRITE",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_IO_POLL_OPCODE: "S3_BOOTSCRIPT_IO_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_MEM_POLL_OPCODE: "S3_BOOTSCRIPT_MEM_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG_POLL_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_PCI_CONFIG2_POLL_OPCODE: "S3_BOOTSCRIPT_PCI_CONFIG2_POLL",
#S3BootScriptOpcode.EFI_BOOT_SCRIPT_TABLE_OPCODE: "S3_BOOTSCRIPT_TABLE",
S3BootScriptOpcode.EFI_BOOT_SCRIPT_TERMINATE_OPCODE: "S3_BOOTSCRIPT_TERMINATE"
}
# //*******************************************
# // EFI_BOOT_SCRIPT_WIDTH
# //*******************************************
# typedef enum {
# EfiBootScriptWidthUint8,
# EfiBootScriptWidthUint16,
# EfiBootScriptWidthUint32,
# EfiBootScriptWidthUint64,
# EfiBootScriptWidthFifoUint8,
# EfiBootScriptWidthFifoUint16,
# EfiBootScriptWidthFifoUint32,
# EfiBootScriptWidthFifoUint64,
# EfiBootScriptWidthFillUint8,
# EfiBootScriptWidthFillUint16,
# EfiBootScriptWidthFillUint32,
# EfiBootScriptWidthFillUint64,
# EfiBootScriptWidthMaximum
# } EFI_BOOT_SCRIPT_WIDTH;
class S3BootScriptWidth:
EFI_BOOT_SCRIPT_WIDTH_UINT8 = 0x00
EFI_BOOT_SCRIPT_WIDTH_UINT16 = 0x01
EFI_BOOT_SCRIPT_WIDTH_UINT32 = 0x02
EFI_BOOT_SCRIPT_WIDTH_UINT64 = 0x03
script_width_sizes = {
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT8 : 1,
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT16 : 2,
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT32 : 4,
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT64 : 8
}
script_width_values = {
1 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT8,
2 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT16,
4 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT32,
8 : S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT64
}
script_width_formats = {
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT8 : 'B',
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT16 : 'H',
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT32 : 'I',
S3BootScriptWidth.EFI_BOOT_SCRIPT_WIDTH_UINT64 : 'Q'
}
# //************************************************
# // EFI_SMBUS_DEVICE_ADDRESS
# //************************************************
# typedef struct _EFI_SMBUS_DEVICE_ADDRESS {
# UINTN SmbusDeviceAddress:7;
# } EFI_SMBUS_DEVICE_ADDRESS;
# //************************************************
# // EFI_SMBUS_DEVICE_COMMAND
# //************************************************
# typedef UINTN EFI_SMBUS_DEVICE_COMMAND;
#
# //************************************************
# // EFI_SMBUS_OPERATION
# //************************************************
# typedef enum _EFI_SMBUS_OPERATION {
# EfiSmbusQuickRead,
# EfiSmbusQuickWrite,
# EfiSmbusReceiveByte,
# EfiSmbusSendByte,
# EfiSmbusReadByte,
# EfiSmbusWriteByte,
# EfiSmbusReadWord,
# EfiSmbusWriteWord,
# EfiSmbusReadBlock,
# EfiSmbusWriteBlock,
# EfiSmbusProcessCall,
# EfiSmbusBWBRProcessCall
# } EFI_SMBUS_OPERATION;
class S3BootScriptSmbusOperation:
QUICK_READ = 0x00
QUICK_WRITE = 0x01
RECEIVE_BYTE = 0x02
SEND_BYTE = 0x03
READ_BYTE = 0x04
WRITE_BYTE = 0x05
READ_WORD = 0x06
WRITE_WORD = 0x07
READ_BLOCK = 0x08
WRITE_BLOCK = 0x09
PROCESS_CALL = 0x0A
BWBR_PROCESS_CALL = 0x0B
class op_io_pci_mem():
def __init__(self, opcode, size, width, address, unknown, count, buffer, value=None, mask=None):
self.opcode = opcode
self.size = size
self.width = width
self.address = address
self.unknown = unknown
self.count = count
self.value = value
self.mask = mask
self.name = script_opcodes[ opcode ]
self.buffer = buffer # data[ self.size : ]
self.values = None
if self.count is not None and self.count > 0 and self.buffer is not None:
sz = self.count * script_width_sizes[ self.width ]
if len(self.buffer) != sz:
logger().log( '[?] buffer size (0x%X) != Width x Count (0x%X)' % (len(self.buffer), sz) )
else:
self.values = list( struct.unpack( ('<%d%c' % (self.count,script_width_formats[self.width])), self.buffer ) )
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Width : 0x%02X (%X bytes)\n" % (self.width, script_width_sizes[self.width])
str_r += " Address: 0x%08X\n" % self.address
if self.value is not None: str_r += " Value : 0x%08X\n" % self.value
if self.mask is not None: str_r += " Mask : 0x%08X\n" % self.mask
if self.unknown is not None: str_r += " Unknown: 0x%04X\n" % self.unknown
if self.count is not None: str_r += " Count : 0x%X\n" % self.count
if self.values is not None:
fmt = '0x%0' + ( '%dX' % (script_width_sizes[self.width]*2) )
str_r += " Values : %s\n" % (" ".join( [fmt % v for v in self.values] ))
elif self.buffer is not None:
str_r += (" Buffer (size = 0x%X):\n" % len(self.buffer)) + dump_buffer( self.buffer, 16 )
return str_r
class op_smbus_execute():
def __init__(self, opcode, size, slave_address, command, operation, peccheck):
self.opcode = opcode
self.size = size
self.slave_address = slave_address
self.command = command
self.operation = operation
self.peccheck = peccheck
self.name = script_opcodes[ opcode ]
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Slave Address: 0x%02X\n" % self.slave_address
str_r += " Command : 0x%08X\n" % self.command
str_r += " Operation : 0x%02X\n" % self.operation
str_r += " PEC Check : %d\n" % self.peccheck
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
# UINT64 Duration;
#} EFI_BOOT_SCRIPT_STALL;
class op_stall():
def __init__(self, opcode, size, duration):
self.opcode = opcode
self.size = size
self.duration = duration
self.name = script_opcodes[ self.opcode ]
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Duration: 0x%08X (us)\n" % self.duration
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
# EFI_PHYSICAL_ADDRESS EntryPoint;
#} EFI_BOOT_SCRIPT_DISPATCH;
class op_dispatch():
def __init__(self, opcode, size, entrypoint, context=None):
self.opcode = opcode
self.size = size
self.entrypoint = entrypoint
self.context = context
self.name = script_opcodes[ self.opcode ]
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Entry Point: 0x%016X\n" % self.entrypoint
if self.context is not None: str_r += " Context : 0x%016X\n" % self.context
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
# UINT32 Width;
# UINT64 Address;
# UINT64 Duration;
# UINT64 LoopTimes;
#} EFI_BOOT_SCRIPT_MEM_POLL;
class op_mem_poll():
def __init__(self, opcode, size, width, address, duration, looptimes):
self.opcode = opcode
self.size = size
self.width = width
self.address = address
self.duration = duration
self.looptimes = looptimes
self.name = 'S3_BOOTSCRIPT_MEM_POLL'
def __str__(self):
str_r = " Opcode : %s (0x%04X)\n" % (self.name, self.opcode)
str_r += " Width : 0x%02X (%X bytes)\n" % (self.width, script_width_sizes[self.width])
str_r += " Address : 0x%016X\n" % self.address
str_r += " Duration? : 0x%016X\n" % self.duration
str_r += " LoopTimes?: 0x%016X\n" % self.looptimes
return str_r
#typedef struct {
# UINT16 OpCode;
# UINT8 Length;
#} EFI_BOOT_SCRIPT_TERMINATE;
class op_terminate():
def __init__(self, opcode, size):
self.opcode = opcode
self.size = size
self.name = script_opcodes[ self.opcode ]
def __str__(self):
return " Opcode : %s (0x%02X)\n" % (self.name, self.opcode)
class op_unknown():
def __init__(self, opcode, size):
self.opcode = opcode
self.size = size
def __str__(self):
return " Opcode : unknown (0x%02X)\n" % self.opcode
class S3BOOTSCRIPT_ENTRY():
def __init__( self, script_type, index, offset_in_script, length, data=None ):
self.script_type = script_type
self.index = index
self.offset_in_script = offset_in_script
self.length = length
self.data = data
self.decoded_opcode = None
self.header_length = 0
def __str__(self):
entry_str = '' if self.index is None else ('[%03d] ' % self.index)
entry_str += ( 'Entry at offset 0x%04X (len = 0x%X, header len = 0x%X):' % (self.offset_in_script, self.length, self.header_length) )
if self.data: entry_str = entry_str + '\nData:\n' + dump_buffer(self.data, 16)
if self.decoded_opcode: entry_str = entry_str + 'Decoded:\n' + str(self.decoded_opcode)
return entry_str
# #################################################################################################
#
# UEFI Table Parsing Functionality
#
# #################################################################################################
MAX_EFI_TABLE_SIZE = 0x1000
# typedef struct {
# UINT64 Signature;
# UINT32 Revision;
# UINT32 HeaderSize;
# UINT32 CRC32;
# UINT32 Reserved;
# } EFI_TABLE_HEADER;
EFI_TABLE_HEADER_FMT = '=8sIIII'
EFI_TABLE_HEADER_SIZE = 0x18
class EFI_TABLE_HEADER( namedtuple('EFI_TABLE_HEADER', 'Signature Revision HeaderSize CRC32 Reserved') ):
__slots__ = ()
def __str__(self):
return """Header:
Signature : %s
Revision : %s
HeaderSize : 0x%08X
CRC32 : 0x%08X
Reserved : 0x%08X""" % ( self.Signature, EFI_SYSTEM_TABLE_REVISION(self.Revision), self.HeaderSize, self.CRC32, self.Reserved )
# #################################################################################################
# EFI System Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
# //
# // EFI Runtime Services Table
# //
# #define EFI_SYSTEM_TABLE_SIGNATURE SIGNATURE_64 ('I','B','I',' ','S','Y','S','T')
# #define EFI_2_31_SYSTEM_TABLE_REVISION ((2 << 16) | (31))
# #define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
# #define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
# #define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10))
# #define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00))
# #define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10))
# #define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02))
# #define EFI_SYSTEM_TABLE_REVISION EFI_2_31_SYSTEM_TABLE_REVISION
#
# \EdkCompatibilityPkg\Foundation\Efi\Include\EfiApi.h
# ----------------------------------------------------
#
# //
# // EFI Configuration Table
# //
# typedef struct {
# EFI_GUID VendorGuid;
# VOID *VendorTable;
# } EFI_CONFIGURATION_TABLE;
#
#
# #define EFI_SYSTEM_TABLE_SIGNATURE 0x5453595320494249ULL
# struct _EFI_SYSTEM_TABLE {
# EFI_TABLE_HEADER Hdr;
#
# CHAR16 *FirmwareVendor;
# UINT32 FirmwareRevision;
#
# EFI_HANDLE ConsoleInHandle;
# EFI_SIMPLE_TEXT_IN_PROTOCOL *ConIn;
#
# EFI_HANDLE ConsoleOutHandle;
# EFI_SIMPLE_TEXT_OUT_PROTOCOL *ConOut;
#
# EFI_HANDLE StandardErrorHandle;
# EFI_SIMPLE_TEXT_OUT_PROTOCOL *StdErr;
#
# EFI_RUNTIME_SERVICES *RuntimeServices;
# EFI_BOOT_SERVICES *BootServices;
#
# UINTN NumberOfTableEntries;
# EFI_CONFIGURATION_TABLE *ConfigurationTable;
#
# };
EFI_SYSTEM_TABLE_SIGNATURE = 'IBI SYST'
EFI_2_50_SYSTEM_TABLE_REVISION = ((2 << 16) | (50))
EFI_2_40_SYSTEM_TABLE_REVISION = ((2 << 16) | (40))
EFI_2_31_SYSTEM_TABLE_REVISION = ((2 << 16) | (31))
EFI_2_30_SYSTEM_TABLE_REVISION = ((2 << 16) | (30))
EFI_2_20_SYSTEM_TABLE_REVISION = ((2 << 16) | (20))
EFI_2_10_SYSTEM_TABLE_REVISION = ((2 << 16) | (10))
EFI_2_00_SYSTEM_TABLE_REVISION = ((2 << 16) | (00))
EFI_1_10_SYSTEM_TABLE_REVISION = ((1 << 16) | (10))
EFI_1_02_SYSTEM_TABLE_REVISION = ((1 << 16) | (02))
EFI_REVISIONS = [EFI_2_50_SYSTEM_TABLE_REVISION, EFI_2_40_SYSTEM_TABLE_REVISION, EFI_2_31_SYSTEM_TABLE_REVISION, EFI_2_30_SYSTEM_TABLE_REVISION, EFI_2_20_SYSTEM_TABLE_REVISION, EFI_2_10_SYSTEM_TABLE_REVISION, EFI_2_00_SYSTEM_TABLE_REVISION, EFI_1_10_SYSTEM_TABLE_REVISION, EFI_1_02_SYSTEM_TABLE_REVISION ]
def EFI_SYSTEM_TABLE_REVISION(revision):
return ('%d.%d' % (revision>>16,revision&0xFFFF) )
EFI_SYSTEM_TABLE_FMT = '=12Q'
class EFI_SYSTEM_TABLE( namedtuple('EFI_SYSTEM_TABLE', 'FirmwareVendor FirmwareRevision ConsoleInHandle ConIn ConsoleOutHandle ConOut StandardErrorHandle StdErr RuntimeServices BootServices NumberOfTableEntries ConfigurationTable') ):
__slots__ = ()
def __str__(self):
return """EFI System Table:
FirmwareVendor : 0x%016X
FirmwareRevision : 0x%016X
ConsoleInHandle : 0x%016X
ConIn : 0x%016X
ConsoleOutHandle : 0x%016X
ConOut : 0x%016X
StandardErrorHandle : 0x%016X
StdErr : 0x%016X
RuntimeServices : 0x%016X
BootServices : 0x%016X
NumberOfTableEntries: 0x%016X
ConfigurationTable : 0x%016X
""" % ( self.FirmwareVendor, self.FirmwareRevision, self.ConsoleInHandle, self.ConIn, self.ConsoleOutHandle, self.ConOut, self.StandardErrorHandle, self.StdErr, self.RuntimeServices, self.BootServices, self.NumberOfTableEntries, self.ConfigurationTable )
# #################################################################################################
# EFI Runtime Services Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
# #define EFI_RUNTIME_SERVICES_SIGNATURE SIGNATURE_64 ('R','U','N','T','S','E','R','V')
# #define EFI_RUNTIME_SERVICES_REVISION EFI_2_31_SYSTEM_TABLE_REVISION
#
# ///
# /// EFI Runtime Services Table.
# ///
# typedef struct {
# ///
# /// The table header for the EFI Runtime Services Table.
# ///
# EFI_TABLE_HEADER Hdr;
#
# //
# // Time Services
# //
# EFI_GET_TIME GetTime;
# EFI_SET_TIME SetTime;
# EFI_GET_WAKEUP_TIME GetWakeupTime;
# EFI_SET_WAKEUP_TIME SetWakeupTime;
#
# //
# // Virtual Memory Services
# //
# EFI_SET_VIRTUAL_ADDRESS_MAP SetVirtualAddressMap;
# EFI_CONVERT_POINTER ConvertPointer;
#
# //
# // Variable Services
# //
# EFI_GET_VARIABLE GetVariable;
# EFI_GET_NEXT_VARIABLE_NAME GetNextVariableName;
# EFI_SET_VARIABLE SetVariable;
#
# //
# // Miscellaneous Services
# //
# EFI_GET_NEXT_HIGH_MONO_COUNT GetNextHighMonotonicCount;
# EFI_RESET_SYSTEM ResetSystem;
#
# //
# // UEFI 2.0 Capsule Services
# //
# EFI_UPDATE_CAPSULE UpdateCapsule;
# EFI_QUERY_CAPSULE_CAPABILITIES QueryCapsuleCapabilities;
#
# //
# // Miscellaneous UEFI 2.0 Service
# //
# EFI_QUERY_VARIABLE_INFO QueryVariableInfo;
# } EFI_RUNTIME_SERVICES;
EFI_RUNTIME_SERVICES_SIGNATURE = 'RUNTSERV'
EFI_RUNTIME_SERVICES_REVISION = EFI_2_31_SYSTEM_TABLE_REVISION
EFI_RUNTIME_SERVICES_TABLE_FMT = '=14Q'
class EFI_RUNTIME_SERVICES_TABLE( namedtuple('EFI_RUNTIME_SERVICES_TABLE', 'GetTime SetTime GetWakeupTime SetWakeupTime SetVirtualAddressMap ConvertPointer GetVariable GetNextVariableName SetVariable GetNextHighMonotonicCount ResetSystem UpdateCapsule QueryCapsuleCapabilities QueryVariableInfo') ):
__slots__ = ()
def __str__(self):
return """Runtime Services:
GetTime : 0x%016X
SetTime : 0x%016X
GetWakeupTime : 0x%016X
SetWakeupTime : 0x%016X
SetVirtualAddressMap : 0x%016X
ConvertPointer : 0x%016X
GetVariable : 0x%016X
GetNextVariableName : 0x%016X
SetVariable : 0x%016X
GetNextHighMonotonicCount: 0x%016X
ResetSystem : 0x%016X
UpdateCapsule : 0x%016X
QueryCapsuleCapabilities : 0x%016X
QueryVariableInfo : 0x%016X
""" % ( self.GetTime, self.SetTime, self.GetWakeupTime, self.SetWakeupTime, self.SetVirtualAddressMap, self.ConvertPointer, self.GetVariable, self.GetNextVariableName, self.SetVariable, self.GetNextHighMonotonicCount, self.ResetSystem, self.UpdateCapsule, self.QueryCapsuleCapabilities, self.QueryVariableInfo )
# #################################################################################################
# EFI Boot Services Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
# #define EFI_BOOT_SERVICES_SIGNATURE SIGNATURE_64 ('B','O','O','T','S','E','R','V')
# #define EFI_BOOT_SERVICES_REVISION EFI_2_31_SYSTEM_TABLE_REVISION
#
# ///
# /// EFI Boot Services Table.
# ///
# typedef struct {
# ///
# /// The table header for the EFI Boot Services Table.
# ///
# EFI_TABLE_HEADER Hdr;
#
# //
# // Task Priority Services
# //
# EFI_RAISE_TPL RaiseTPL;
# EFI_RESTORE_TPL RestoreTPL;
#
# //
# // Memory Services
# //
# EFI_ALLOCATE_PAGES AllocatePages;
# EFI_FREE_PAGES FreePages;
# EFI_GET_MEMORY_MAP GetMemoryMap;
# EFI_ALLOCATE_POOL AllocatePool;
# EFI_FREE_POOL FreePool;
#
# //
# // Event & Timer Services
# //
# EFI_CREATE_EVENT CreateEvent;
# EFI_SET_TIMER SetTimer;
# EFI_WAIT_FOR_EVENT WaitForEvent;
# EFI_SIGNAL_EVENT SignalEvent;
# EFI_CLOSE_EVENT CloseEvent;
# EFI_CHECK_EVENT CheckEvent;
#
# //
# // Protocol Handler Services
# //
# EFI_INSTALL_PROTOCOL_INTERFACE InstallProtocolInterface;
# EFI_REINSTALL_PROTOCOL_INTERFACE ReinstallProtocolInterface;
# EFI_UNINSTALL_PROTOCOL_INTERFACE UninstallProtocolInterface;
# EFI_HANDLE_PROTOCOL HandleProtocol;
# VOID *Reserved;
# EFI_REGISTER_PROTOCOL_NOTIFY RegisterProtocolNotify;
# EFI_LOCATE_HANDLE LocateHandle;
# EFI_LOCATE_DEVICE_PATH LocateDevicePath;
# EFI_INSTALL_CONFIGURATION_TABLE InstallConfigurationTable;
#
# //
# // Image Services
# //
# EFI_IMAGE_LOAD LoadImage;
# EFI_IMAGE_START StartImage;
# EFI_EXIT Exit;
# EFI_IMAGE_UNLOAD UnloadImage;
# EFI_EXIT_BOOT_SERVICES ExitBootServices;
#
# //
# // Miscellaneous Services
# //
# EFI_GET_NEXT_MONOTONIC_COUNT GetNextMonotonicCount;
# EFI_STALL Stall;
# EFI_SET_WATCHDOG_TIMER SetWatchdogTimer;
#
# //
# // DriverSupport Services
# //
# EFI_CONNECT_CONTROLLER ConnectController;
# EFI_DISCONNECT_CONTROLLER DisconnectController;
#
# //
# // Open and Close Protocol Services
# //
# EFI_OPEN_PROTOCOL OpenProtocol;
# EFI_CLOSE_PROTOCOL CloseProtocol;
# EFI_OPEN_PROTOCOL_INFORMATION OpenProtocolInformation;
#
# //
# // Library Services
# //
# EFI_PROTOCOLS_PER_HANDLE ProtocolsPerHandle;
# EFI_LOCATE_HANDLE_BUFFER LocateHandleBuffer;
# EFI_LOCATE_PROTOCOL LocateProtocol;
# EFI_INSTALL_MULTIPLE_PROTOCOL_INTERFACES InstallMultipleProtocolInterfaces;
# EFI_UNINSTALL_MULTIPLE_PROTOCOL_INTERFACES UninstallMultipleProtocolInterfaces;
#
# //
# // 32-bit CRC Services
# //
# EFI_CALCULATE_CRC32 CalculateCrc32;
#
# //
# // Miscellaneous Services
# //
# EFI_COPY_MEM CopyMem;
# EFI_SET_MEM SetMem;
# EFI_CREATE_EVENT_EX CreateEventEx;
# } EFI_BOOT_SERVICES;
EFI_BOOT_SERVICES_SIGNATURE = 'BOOTSERV'
EFI_BOOT_SERVICES_REVISION = EFI_2_31_SYSTEM_TABLE_REVISION
EFI_BOOT_SERVICES_TABLE_FMT = '=44Q'
class EFI_BOOT_SERVICES_TABLE( namedtuple('EFI_BOOT_SERVICES_TABLE', 'RaiseTPL RestoreTPL AllocatePages FreePages GetMemoryMap AllocatePool FreePool CreateEvent SetTimer WaitForEvent SignalEvent CloseEvent CheckEvent InstallProtocolInterface ReinstallProtocolInterface UninstallProtocolInterface HandleProtocol Reserved RegisterProtocolNotify LocateHandle LocateDevicePath InstallConfigurationTable LoadImage StartImage Exit UnloadImage ExitBootServices GetNextMonotonicCount Stall SetWatchdogTimer ConnectController DisconnectController OpenProtocol CloseProtocol OpenProtocolInformation ProtocolsPerHandle LocateHandleBuffer LocateProtocol InstallMultipleProtocolInterfaces UninstallMultipleProtocolInterfaces CalculateCrc32 CopyMem SetMem CreateEventEx') ):
__slots__ = ()
def __str__(self):
return """Boot Services:
RaiseTPL : 0x%016X
RestoreTPL : 0x%016X
AllocatePages : 0x%016X
FreePages : 0x%016X
GetMemoryMap : 0x%016X
AllocatePool : 0x%016X
FreePool : 0x%016X
CreateEvent : 0x%016X
SetTimer : 0x%016X
WaitForEvent : 0x%016X
SignalEvent : 0x%016X
CloseEvent : 0x%016X
CheckEvent : 0x%016X
InstallProtocolInterface : 0x%016X
ReinstallProtocolInterface : 0x%016X
UninstallProtocolInterface : 0x%016X
HandleProtocol : 0x%016X
Reserved : 0x%016X
RegisterProtocolNotify : 0x%016X
LocateHandle : 0x%016X
LocateDevicePath : 0x%016X
InstallConfigurationTable : 0x%016X
LoadImage : 0x%016X
StartImage : 0x%016X
Exit : 0x%016X
UnloadImage : 0x%016X
ExitBootServices : 0x%016X
GetNextMonotonicCount : 0x%016X
Stall : 0x%016X
SetWatchdogTimer : 0x%016X
ConnectController : 0x%016X
DisconnectController : 0x%016X
OpenProtocol : 0x%016X
CloseProtocol : 0x%016X
OpenProtocolInformation : 0x%016X
ProtocolsPerHandle : 0x%016X
LocateHandleBuffer : 0x%016X
LocateProtocol : 0x%016X
InstallMultipleProtocolInterfaces : 0x%016X
UninstallMultipleProtocolInterfaces: 0x%016X
CalculateCrc32 : 0x%016X
CopyMem : 0x%016X
SetMem : 0x%016X
CreateEventEx : 0x%016X
""" % ( self.RaiseTPL, self.RestoreTPL, self.AllocatePages, self.FreePages, self.GetMemoryMap, self.AllocatePool, self.FreePool, self.CreateEvent, self.SetTimer, self.WaitForEvent, self.SignalEvent, self.CloseEvent, self.CheckEvent, self.InstallProtocolInterface, self.ReinstallProtocolInterface, self.UninstallProtocolInterface, self.HandleProtocol, self.Reserved, self.RegisterProtocolNotify, self.LocateHandle, self.LocateDevicePath, self.InstallConfigurationTable, self.LoadImage, self.StartImage, self.Exit, self.UnloadImage, self.ExitBootServices, self.GetNextMonotonicCount, self.Stall, self.SetWatchdogTimer, self.ConnectController, self.DisconnectController, self.OpenProtocol, self.CloseProtocol, self.OpenProtocolInformation, self.ProtocolsPerHandle, self.LocateHandleBuffer, self.LocateProtocol, self.InstallMultipleProtocolInterfaces, self.UninstallMultipleProtocolInterfaces, self.CalculateCrc32, self.CopyMem, self.SetMem, self.CreateEventEx )
# #################################################################################################
# EFI System Configuration Table
# #################################################################################################
#
# \MdePkg\Include\Uefi\UefiSpec.h
# -------------------------------
#
#///
#/// Contains a set of GUID/pointer pairs comprised of the ConfigurationTable field in the
#/// EFI System Table.
#///
#typedef struct {
# ///
# /// The 128-bit GUID value that uniquely identifies the system configuration table.
# ///
# EFI_GUID VendorGuid;
# ///
# /// A pointer to the table associated with VendorGuid.
# ///
# VOID *VendorTable;
#} EFI_CONFIGURATION_TABLE;
#
EFI_VENDOR_TABLE_FORMAT = '<' + EFI_GUID_FMT + 'Q'
EFI_VENDOR_TABLE_SIZE = struct.calcsize(EFI_VENDOR_TABLE_FORMAT)
class EFI_VENDOR_TABLE( namedtuple('EFI_VENDOR_TABLE', 'VendorGuid0 VendorGuid1 VendorGuid2 VendorGuid3 VendorTable') ):
__slots__ = ()
def VendorGuid(self):
return EFI_GUID(self.VendorGuid0,self.VendorGuid1,self.VendorGuid2,self.VendorGuid3)
class EFI_CONFIGURATION_TABLE():
def __init__( self ):
self.VendorTables = {}
def __str__(self):
return ( 'Vendor Tables:\n%s' % (''.join( ['{%s} : 0x%016X\n' % (vt,self.VendorTables[vt]) for vt in self.VendorTables])) )
# #################################################################################################
# EFI DXE Services Table
# #################################################################################################
#
# \MdePkg\Include\Pi\PiDxeCis.h
# -----------------------------
#
# //
# // DXE Services Table
# //
# #define DXE_SERVICES_SIGNATURE 0x565245535f455844ULL
# #define DXE_SPECIFICATION_MAJOR_REVISION 1
# #define DXE_SPECIFICATION_MINOR_REVISION 20
# #define DXE_SERVICES_REVISION ((DXE_SPECIFICATION_MAJOR_REVISION<<16) | (DXE_SPECIFICATION_MINOR_REVISION))
#
# typedef struct {
# ///
# /// The table header for the DXE Services Table.
# /// This header contains the DXE_SERVICES_SIGNATURE and DXE_SERVICES_REVISION values.
# ///
# EFI_TABLE_HEADER Hdr;
#
# //
# // Global Coherency Domain Services
# //
# EFI_ADD_MEMORY_SPACE AddMemorySpace;
# EFI_ALLOCATE_MEMORY_SPACE AllocateMemorySpace;
# EFI_FREE_MEMORY_SPACE FreeMemorySpace;
# EFI_REMOVE_MEMORY_SPACE RemoveMemorySpace;
# EFI_GET_MEMORY_SPACE_DESCRIPTOR GetMemorySpaceDescriptor;
# EFI_SET_MEMORY_SPACE_ATTRIBUTES SetMemorySpaceAttributes;
# EFI_GET_MEMORY_SPACE_MAP GetMemorySpaceMap;
# EFI_ADD_IO_SPACE AddIoSpace;
# EFI_ALLOCATE_IO_SPACE AllocateIoSpace;
# EFI_FREE_IO_SPACE FreeIoSpace;
# EFI_REMOVE_IO_SPACE RemoveIoSpace;
# EFI_GET_IO_SPACE_DESCRIPTOR GetIoSpaceDescriptor;
# EFI_GET_IO_SPACE_MAP GetIoSpaceMap;
#
# //
# // Dispatcher Services
# //
# EFI_DISPATCH Dispatch;
# EFI_SCHEDULE Schedule;
# EFI_TRUST Trust;
# //
# // Service to process a single firmware volume found in a capsule
# //
# EFI_PROCESS_FIRMWARE_VOLUME ProcessFirmwareVolume;
# } DXE_SERVICES;
#DXE_SERVICES_SIGNATURE = 0x565245535f455844
#DXE_SPECIFICATION_MAJOR_REVISION = 1
#DXE_SPECIFICATION_MINOR_REVISION = 20
#DXE_SERVICES_REVISION = ((DXE_SPECIFICATION_MAJOR_REVISION<<16) | (DXE_SPECIFICATION_MINOR_REVISION))
EFI_DXE_SERVICES_TABLE_SIGNATURE = 'DXE_SERV' # 0x565245535f455844
EFI_DXE_SERVICES_TABLE_FMT = '=17Q'
class EFI_DXE_SERVICES_TABLE( namedtuple('EFI_DXE_SERVICES_TABLE', 'AddMemorySpace AllocateMemorySpace FreeMemorySpace RemoveMemorySpace GetMemorySpaceDescriptor SetMemorySpaceAttributes GetMemorySpaceMap AddIoSpace AllocateIoSpace FreeIoSpace RemoveIoSpace GetIoSpaceDescriptor GetIoSpaceMap Dispatch Schedule Trust ProcessFirmwareVolume') ):
__slots__ = ()
def __str__(self):
return """DXE Services:
AddMemorySpace : 0x%016X
AllocateMemorySpace : 0x%016X
FreeMemorySpace : 0x%016X
RemoveMemorySpace : 0x%016X
GetMemorySpaceDescriptor: 0x%016X
SetMemorySpaceAttributes: 0x%016X
GetMemorySpaceMap : 0x%016X
AddIoSpace : 0x%016X
AllocateIoSpace : 0x%016X
FreeIoSpace : 0x%016X
RemoveIoSpace : 0x%016X
GetIoSpaceDescriptor : 0x%016X
GetIoSpaceMap : 0x%016X
Dispatch : 0x%016X
Schedule : 0x%016X
Trust : 0x%016X
ProcessFirmwareVolume : 0x%016X
""" % ( self.AddMemorySpace, self.AllocateMemorySpace, self.FreeMemorySpace, self.RemoveMemorySpace, self.GetMemorySpaceDescriptor, self.SetMemorySpaceAttributes, self.GetMemorySpaceMap, self.AddIoSpace, self.AllocateIoSpace, self.FreeIoSpace, self.RemoveIoSpace, self.GetIoSpaceDescriptor, self.GetIoSpaceMap, self.Dispatch, self.Schedule, self.Trust, self.ProcessFirmwareVolume )
# #################################################################################################
# EFI PEI Services Table
# #################################################################################################
#
# //
# // Framework PEI Specification Revision information
# //
# #define FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION 0
# #define FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION 91
#
#
# //
# // PEI services signature and Revision defined in Framework PEI spec
# //
# #define FRAMEWORK_PEI_SERVICES_SIGNATURE 0x5652455320494550ULL
# #define FRAMEWORK_PEI_SERVICES_REVISION ((FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION<<16) | (FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION))
#
# ///
# /// FRAMEWORK_EFI_PEI_SERVICES is a collection of functions whose implementation is provided by the PEI
# /// Foundation. The table may be located in the temporary or permanent memory, depending upon the capabilities
# /// and phase of execution of PEI.
# ///
# /// These services fall into various classes, including the following:
# /// - Managing the boot mode.
# /// - Allocating both early and permanent memory.
# /// - Supporting the Firmware File System (FFS).
# /// - Abstracting the PPI database abstraction.
# /// - Creating Hand-Off Blocks (HOBs).
# ///
# struct _FRAMEWORK_EFI_PEI_SERVICES {
# EFI_TABLE_HEADER Hdr;
# //
# // PPI Functions
# //
# EFI_PEI_INSTALL_PPI InstallPpi;
# EFI_PEI_REINSTALL_PPI ReInstallPpi;
# EFI_PEI_LOCATE_PPI LocatePpi;
# EFI_PEI_NOTIFY_PPI NotifyPpi;
# //
# // Boot Mode Functions
# //
# EFI_PEI_GET_BOOT_MODE GetBootMode;
# EFI_PEI_SET_BOOT_MODE SetBootMode;
# //
# // HOB Functions
# //
# EFI_PEI_GET_HOB_LIST GetHobList;
# EFI_PEI_CREATE_HOB CreateHob;
# //
# // Firmware Volume Functions
# //
# EFI_PEI_FFS_FIND_NEXT_VOLUME FfsFindNextVolume;
# EFI_PEI_FFS_FIND_NEXT_FILE FfsFindNextFile;
# EFI_PEI_FFS_FIND_SECTION_DATA FfsFindSectionData;
# //
# // PEI Memory Functions
# //
# EFI_PEI_INSTALL_PEI_MEMORY InstallPeiMemory;
# EFI_PEI_ALLOCATE_PAGES AllocatePages;
# EFI_PEI_ALLOCATE_POOL AllocatePool;
# EFI_PEI_COPY_MEM CopyMem;
# EFI_PEI_SET_MEM SetMem;
# //
# // (the following interfaces are installed by publishing PEIM)
# // Status Code
# //
# EFI_PEI_REPORT_STATUS_CODE ReportStatusCode;
# //
# // Reset
# //
# EFI_PEI_RESET_SYSTEM ResetSystem;
# ///
# /// Inconsistent with specification here:
# /// In Framework Spec, PeiCis0.91, CpuIo and PciCfg are NOT pointers.
# ///
#
# //
# // I/O Abstractions
# //
# EFI_PEI_CPU_IO_PPI *CpuIo;
# EFI_PEI_PCI_CFG_PPI *PciCfg;
# };
EFI_FRAMEWORK_PEI_SERVICES_TABLE_SIGNATURE = 0x5652455320494550
#FRAMEWORK_PEI_SERVICES_SIGNATURE = 0x5652455320494550
FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION = 0
FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION = 91
FRAMEWORK_PEI_SERVICES_REVISION = ((FRAMEWORK_PEI_SPECIFICATION_MAJOR_REVISION<<16) | (FRAMEWORK_PEI_SPECIFICATION_MINOR_REVISION))
# #################################################################################################
# EFI System Management System Table
# #################################################################################################
#
#define SMM_SMST_SIGNATURE EFI_SIGNATURE_32 ('S', 'M', 'S', 'T')
#define EFI_SMM_SYSTEM_TABLE_REVISION (0 << 16) | (0x09)
# //
# // System Management System Table (SMST)
# //
# struct _EFI_SMM_SYSTEM_TABLE {
# ///
# /// The table header for the System Management System Table (SMST).
# ///
# EFI_TABLE_HEADER Hdr;
#
# ///
# /// A pointer to a NULL-terminated Unicode string containing the vendor name. It is
# /// permissible for this pointer to be NULL.
# ///
# CHAR16 *SmmFirmwareVendor;
# ///
# /// The particular revision of the firmware.
# ///
# UINT32 SmmFirmwareRevision;
#
# ///
# /// Adds, updates, or removes a configuration table entry from the SMST.
# ///
# EFI_SMM_INSTALL_CONFIGURATION_TABLE SmmInstallConfigurationTable;
#
# //
# // I/O Services
# //
# ///
# /// A GUID that designates the particular CPU I/O services.
# ///
# EFI_GUID EfiSmmCpuIoGuid;
# ///
# /// Provides the basic memory and I/O interfaces that are used to abstract accesses to
# /// devices.
# ///
# EFI_SMM_CPU_IO_INTERFACE SmmIo;
#
# //
# // Runtime memory service
# //
# ///
# ///
# /// Allocates pool memory from SMRAM for IA-32 or runtime memory for the
# /// Itanium processor family.
# ///
# EFI_SMMCORE_ALLOCATE_POOL SmmAllocatePool;
# ///
# /// Returns pool memory to the system.
# ///
# EFI_SMMCORE_FREE_POOL SmmFreePool;
# ///
# /// Allocates memory pages from the system.
# ///
# EFI_SMMCORE_ALLOCATE_PAGES SmmAllocatePages;
# ///
# /// Frees memory pages for the system.
# ///
# EFI_SMMCORE_FREE_PAGES SmmFreePages;
#
# //
# // MP service
# //
#
# /// Inconsistent with specification here:
# /// In Framework Spec, this definition does not exist. This method is introduced in PI1.1 specification for
# /// the implementation needed.
# EFI_SMM_STARTUP_THIS_AP SmmStartupThisAp;
#
# //
# // CPU information records
# //
# ///
# /// A 1-relative number between 1 and the NumberOfCpus field. This field designates
# /// which processor is executing the SMM infrastructure. This number also serves as an
# /// index into the CpuSaveState and CpuOptionalFloatingPointState
# /// fields.
# ///
# UINTN CurrentlyExecutingCpu;
# ///
# /// The number of EFI Configuration Tables in the buffer
# /// SmmConfigurationTable.
# ///
# UINTN NumberOfCpus;
# ///
# /// A pointer to the EFI Configuration Tables. The number of entries in the table is
# /// NumberOfTableEntries.
# ///
# EFI_SMM_CPU_SAVE_STATE *CpuSaveState;
# ///
# /// A pointer to a catenation of the EFI_SMM_FLOATING_POINT_SAVE_STATE.
# /// The size of this entire table is NumberOfCpus* size of the
# /// EFI_SMM_FLOATING_POINT_SAVE_STATE. These fields are populated only if
# /// there is at least one SMM driver that has registered for a callback with the
# /// FloatingPointSave field in EFI_SMM_BASE_PROTOCOL.RegisterCallback() set to TRUE.
# ///
# EFI_SMM_FLOATING_POINT_SAVE_STATE *CpuOptionalFloatingPointState;
#
# //
# // Extensibility table
# //
# ///
# /// The number of EFI Configuration Tables in the buffer
# /// SmmConfigurationTable.
# ///
# UINTN NumberOfTableEntries;
# ///
# /// A pointer to the EFI Configuration Tables. The number of entries in the table is
# /// NumberOfTableEntries.
# ///
# EFI_CONFIGURATION_TABLE *SmmConfigurationTable;
# };
EFI_SMM_SYSTEM_TABLE_SIGNATURE = 'SMST'
EFI_SMM_SYSTEM_TABLE_REVISION = (0 << 16) | (0x09)
EFI_TABLES = {
EFI_SYSTEM_TABLE_SIGNATURE : {'name' : 'EFI System Table', 'struct' : EFI_SYSTEM_TABLE, 'fmt' : EFI_SYSTEM_TABLE_FMT },
EFI_RUNTIME_SERVICES_SIGNATURE : {'name' : 'EFI Runtime Services Table', 'struct' : EFI_RUNTIME_SERVICES_TABLE, 'fmt' : EFI_RUNTIME_SERVICES_TABLE_FMT },
EFI_BOOT_SERVICES_SIGNATURE : {'name' : 'EFI Boot Services Table', 'struct' : EFI_BOOT_SERVICES_TABLE, 'fmt' : EFI_BOOT_SERVICES_TABLE_FMT },
EFI_DXE_SERVICES_TABLE_SIGNATURE : {'name' : 'EFI DXE Services Table', 'struct' : EFI_DXE_SERVICES_TABLE, 'fmt' : EFI_DXE_SERVICES_TABLE_FMT }
#EFI_FRAMEWORK_PEI_SERVICES_TABLE_SIGNATURE : {'name' : 'EFI Framework PEI Services Table', 'struct' : EFI_FRAMEWORK_PEI_SERVICES_TABLE, 'fmt' : EFI_FRAMEWORK_PEI_SERVICES_TABLE_FMT },
#EFI_SMM_SYSTEM_TABLE_SIGNATURE : {'name' : 'EFI SMM System Table', 'struct' : EFI_SMM_SYSTEM_TABLE, 'fmt' : EFI_SMM_SYSTEM_TABLE_FMT },
#EFI_CONFIG_TABLE_SIGNATURE : {'name' : 'EFI Configuration Table', 'struct' : EFI_CONFIG_TABLE, 'fmt' : EFI_CONFIG_TABLE_FMT }
}
| gpl-2.0 |
proppy/appengine-try-python-flask | lib/werkzeug/testapp.py | 303 | 9398 | # -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode(
'''R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
| apache-2.0 |
jazkarta/edx-platform-for-isc | common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore_bulk_operations.py | 8 | 33066 | import copy
import ddt
import unittest
from bson.objectid import ObjectId
from mock import MagicMock, Mock, call
from xmodule.modulestore.split_mongo.split import SplitBulkWriteMixin
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection
from opaque_keys.edx.locator import CourseLocator
class TestBulkWriteMixin(unittest.TestCase):
def setUp(self):
super(TestBulkWriteMixin, self).setUp()
self.bulk = SplitBulkWriteMixin()
self.bulk.SCHEMA_VERSION = 1
self.clear_cache = self.bulk._clear_cache = Mock(name='_clear_cache')
self.conn = self.bulk.db_connection = MagicMock(name='db_connection', spec=MongoConnection)
self.conn.get_course_index.return_value = {'initial': 'index'}
self.course_key = CourseLocator('org', 'course', 'run-a', branch='test')
self.course_key_b = CourseLocator('org', 'course', 'run-b', branch='test')
self.structure = {'this': 'is', 'a': 'structure', '_id': ObjectId()}
self.definition = {'this': 'is', 'a': 'definition', '_id': ObjectId()}
self.index_entry = {'this': 'is', 'an': 'index'}
def assertConnCalls(self, *calls):
self.assertEqual(list(calls), self.conn.mock_calls)
def assertCacheNotCleared(self):
self.assertFalse(self.clear_cache.called)
class TestBulkWriteMixinPreviousTransaction(TestBulkWriteMixin):
"""
Verify that opening and closing a transaction doesn't affect later behaviour.
"""
def setUp(self):
super(TestBulkWriteMixinPreviousTransaction, self).setUp()
self.bulk._begin_bulk_operation(self.course_key)
self.bulk.insert_course_index(self.course_key, MagicMock('prev-index-entry'))
self.bulk.update_structure(self.course_key, {'this': 'is', 'the': 'previous structure', '_id': ObjectId()})
self.bulk._end_bulk_operation(self.course_key)
self.conn.reset_mock()
self.clear_cache.reset_mock()
@ddt.ddt
class TestBulkWriteMixinClosed(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk operations aren't active.
"""
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_no_bulk_read_structure(self, version_guid):
# Reading a structure when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertConnCalls(call.get_structure(self.course_key.as_object_id(version_guid)))
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
def test_no_bulk_write_structure(self):
# Writing a structure when no bulk operation is active should just
# call through to the db_connection. It should also clear the
# system cache
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls(call.insert_structure(self.structure))
self.clear_cache.assert_called_once_with(self.structure['_id'])
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_no_bulk_read_definition(self, version_guid):
# Reading a definition when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertConnCalls(call.get_definition(self.course_key.as_object_id(version_guid)))
self.assertEqual(result, self.conn.get_definition.return_value)
def test_no_bulk_write_definition(self):
# Writing a definition when no bulk operation is active should just
# call through to the db_connection.
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls(call.insert_definition(self.definition))
@ddt.data(True, False)
def test_no_bulk_read_index(self, ignore_case):
# Reading a course index when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertConnCalls(call.get_course_index(self.course_key, ignore_case))
self.assertEqual(result, self.conn.get_course_index.return_value)
self.assertCacheNotCleared()
def test_no_bulk_write_index(self):
# Writing a course index when no bulk operation is active should just call
# through to the db_connection
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls(call.insert_course_index(self.index_entry))
self.assertCacheNotCleared()
def test_out_of_order_end(self):
# Calling _end_bulk_operation without a corresponding _begin...
# is a noop
self.bulk._end_bulk_operation(self.course_key)
def test_write_new_index_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_course_index.assert_called_once_with(self.index_entry)
def test_write_updated_index_on_close(self):
old_index = {'this': 'is', 'an': 'old index'}
self.conn.get_course_index.return_value = old_index
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.update_course_index.assert_called_once_with(self.index_entry, from_index=old_index)
def test_write_structure_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_structure(self.structure))
def test_write_multiple_structures_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[call.insert_structure(self.structure), call.insert_structure(other_structure)],
self.conn.mock_calls
)
def test_write_index_and_definition_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.definition['_id']}})
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_definition(self.definition),
call.update_course_index(
{'versions': {self.course_key.branch: self.definition['_id']}},
from_index=original_index
)
)
def test_write_index_and_multiple_definitions_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}})
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[
call.insert_definition(self.definition),
call.insert_definition(other_definition),
call.update_course_index(
{'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}},
from_index=original_index
)
],
self.conn.mock_calls
)
def test_write_definition_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_definition(self.definition))
def test_write_multiple_definitions_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[call.insert_definition(self.definition), call.insert_definition(other_definition)],
self.conn.mock_calls
)
def test_write_index_and_structure_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.structure['_id']}})
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_structure(self.structure),
call.update_course_index(
{'versions': {self.course_key.branch: self.structure['_id']}},
from_index=original_index
)
)
def test_write_index_and_multiple_structures_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}})
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[
call.insert_structure(self.structure),
call.insert_structure(other_structure),
call.update_course_index(
{'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}},
from_index=original_index
)
],
self.conn.mock_calls
)
def test_version_structure_creates_new_version(self):
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_version_structure_new_course(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
version_result = self.bulk.version_structure(self.course_key, self.structure, 'user_id')
get_result = self.bulk.get_structure(self.course_key, version_result['_id'])
self.assertEquals(version_result, get_result)
class TestBulkWriteMixinClosedAfterPrevTransaction(TestBulkWriteMixinClosed, TestBulkWriteMixinPreviousTransaction):
"""
Test that operations on with a closed transaction aren't affected by a previously executed transaction
"""
pass
@ddt.ddt
class TestBulkWriteMixinFindMethods(TestBulkWriteMixin):
"""
Tests of BulkWriteMixin methods for finding many structures or indexes
"""
def test_no_bulk_find_matching_course_indexes(self):
branch = Mock(name='branch')
search_targets = MagicMock(name='search_targets')
self.conn.find_matching_course_indexes.return_value = [Mock(name='result')]
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertConnCalls(call.find_matching_course_indexes(branch, search_targets))
self.assertEqual(result, self.conn.find_matching_course_indexes.return_value)
self.assertCacheNotCleared()
@ddt.data(
(None, None, [], []),
(
'draft',
None,
[{'versions': {'draft': '123'}}],
[
{'versions': {'published': '123'}},
{}
],
),
(
'draft',
{'f1': 'v1'},
[{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}}],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'value2'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1'},
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}, 'search_targets': {'f2': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1', 'f2': 2},
[
{'search_targets': {'f1': 'v1', 'f2': 2}},
{'search_targets': {'f1': 'v1', 'f2': 2}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}},
],
),
)
@ddt.unpack
def test_find_matching_course_indexes(self, branch, search_targets, matching, unmatching):
db_indexes = [{'org': 'what', 'course': 'this', 'run': 'needs'}]
for n, index in enumerate(matching + unmatching):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
for attr in ['org', 'course', 'run']:
index[attr] = getattr(course_key, attr)
self.bulk.insert_course_index(course_key, index)
expected = matching + db_indexes
self.conn.find_matching_course_indexes.return_value = db_indexes
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertItemsEqual(result, expected)
for item in unmatching:
self.assertNotIn(item, result)
def test_no_bulk_find_structures_by_id(self):
ids = [Mock(name='id')]
self.conn.find_structures_by_id.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_by_id(ids)
self.assertConnCalls(call.find_structures_by_id(ids))
self.assertEqual(result, self.conn.find_structures_by_id.return_value)
self.assertCacheNotCleared()
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_find_structures_by_id(self, search_ids, active_ids, db_ids):
db_structure = lambda _id: {'db': 'structure', '_id': _id}
active_structure = lambda _id: {'active': 'structure', '_id': _id}
db_structures = [db_structure(_id) for _id in db_ids if _id not in active_ids]
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, active_structure(_id))
self.conn.find_structures_by_id.return_value = db_structures
results = self.bulk.find_structures_by_id(search_ids)
self.conn.find_structures_by_id.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
self.assertIn(active_structure(_id), results)
else:
self.assertNotIn(active_structure(_id), results)
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
self.assertIn(db_structure(_id), results)
else:
self.assertNotIn(db_structure(_id), results)
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_get_definitions(self, search_ids, active_ids, db_ids):
db_definition = lambda _id: {'db': 'definition', '_id': _id}
active_definition = lambda _id: {'active': 'definition', '_id': _id}
db_definitions = [db_definition(_id) for _id in db_ids if _id not in active_ids]
self.bulk._begin_bulk_operation(self.course_key)
for n, _id in enumerate(active_ids):
self.bulk.update_definition(self.course_key, active_definition(_id))
self.conn.get_definitions.return_value = db_definitions
results = self.bulk.get_definitions(self.course_key, search_ids)
self.conn.get_definitions.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
self.assertIn(active_definition(_id), results)
else:
self.assertNotIn(active_definition(_id), results)
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
self.assertIn(db_definition(_id), results)
else:
self.assertNotIn(db_definition(_id), results)
def test_no_bulk_find_structures_derived_from(self):
ids = [Mock(name='id')]
self.conn.find_structures_derived_from.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_derived_from(ids)
self.assertConnCalls(call.find_structures_derived_from(ids))
self.assertEqual(result, self.conn.find_structures_derived_from.return_value)
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - previous_versions to search for
# - documents in the cache with $previous_version.$_id
# - documents in the db with $previous_version.$_id
([], [], []),
(['1', '2', '3'], ['1.a', '1.b', '2.c'], ['1.a', '2.c']),
(['1', '2', '3'], ['1.a'], ['1.a', '2.c']),
(['1', '2', '3'], [], ['1.a', '2.c']),
(['1', '2', '3'], ['4.d'], ['1.a', '2.c']),
)
@ddt.unpack
def test_find_structures_derived_from(self, search_ids, active_ids, db_ids):
def db_structure(_id):
previous, _, current = _id.partition('.')
return {'db': 'structure', 'previous_version': previous, '_id': current}
def active_structure(_id):
previous, _, current = _id.partition('.')
return {'active': 'structure', 'previous_version': previous, '_id': current}
db_structures = [db_structure(_id) for _id in db_ids]
active_structures = []
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
structure = active_structure(_id)
self.bulk.update_structure(course_key, structure)
active_structures.append(structure)
self.conn.find_structures_derived_from.return_value = db_structures
results = self.bulk.find_structures_derived_from(search_ids)
self.conn.find_structures_derived_from.assert_called_once_with(search_ids)
for structure in active_structures:
if structure['previous_version'] in search_ids:
self.assertIn(structure, results)
else:
self.assertNotIn(structure, results)
for structure in db_structures:
if (
structure['previous_version'] in search_ids and # We're searching for this document
not any(active.endswith(structure['_id']) for active in active_ids) # This document doesn't match any active _ids
):
self.assertIn(structure, results)
else:
self.assertNotIn(structure, results)
def test_no_bulk_find_ancestor_structures(self):
original_version = Mock(name='original_version')
block_id = Mock(name='block_id')
self.conn.find_ancestor_structures.return_value = [MagicMock(name='result')]
result = self.bulk.find_ancestor_structures(original_version, block_id)
self.assertConnCalls(call.find_ancestor_structures(original_version, block_id))
self.assertEqual(result, self.conn.find_ancestor_structures.return_value)
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - original_version
# - block_id
# - matching documents in the cache
# - non-matching documents in the cache
# - expected documents returned from the db
# - unexpected documents returned from the db
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], [], []),
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}, '_id': 'foo'}], [], [], [{'_id': 'foo'}]),
('ov', 'bi', [], [{'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], []),
('ov', 'bi', [], [{'original_version': 'ov'}], [], []),
('ov', 'bi', [], [], [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], []),
(
'ov',
'bi',
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}],
[],
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'bar'}}}}],
[]
),
)
@ddt.unpack
def test_find_ancestor_structures(self, original_version, block_id, active_match, active_unmatch, db_match, db_unmatch):
for structure in active_match + active_unmatch + db_match + db_unmatch:
structure.setdefault('_id', ObjectId())
for n, structure in enumerate(active_match + active_unmatch):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, structure)
self.conn.find_ancestor_structures.return_value = db_match + db_unmatch
results = self.bulk.find_ancestor_structures(original_version, block_id)
self.conn.find_ancestor_structures.assert_called_once_with(original_version, block_id)
self.assertItemsEqual(active_match + db_match, results)
@ddt.ddt
class TestBulkWriteMixinOpen(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk write operations are open
"""
def setUp(self):
super(TestBulkWriteMixinOpen, self).setUp()
self.bulk._begin_bulk_operation(self.course_key)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_without_write_from_db(self, version_guid):
# Reading a structure before it's been written (while in bulk operation mode)
# returns the structure from the database
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_without_write_only_reads_once(self, version_guid):
# Reading the same structure multiple times shouldn't hit the database
# more than once
for _ in xrange(2):
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_after_write_no_db(self, version_guid):
# Reading a structure that's already been written shouldn't hit the db at all
self.structure['_id'] = version_guid
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 0)
self.assertEqual(result, self.structure)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_after_write_after_read(self, version_guid):
# Reading a structure that's been updated after being pulled from the db should
# still get the updated value
self.structure['_id'] = version_guid
self.bulk.get_structure(self.course_key, version_guid)
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.structure)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_without_write_from_db(self, version_guid):
# Reading a definition before it's been written (while in bulk operation mode)
# returns the definition from the database
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.conn.get_definition.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_without_write_only_reads_once(self, version_guid):
# Reading the same definition multiple times shouldn't hit the database
# more than once
for _ in xrange(2):
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.conn.get_definition.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_after_write_no_db(self, version_guid):
# Reading a definition that's already been written shouldn't hit the db at all
self.definition['_id'] = version_guid
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 0)
self.assertEqual(result, self.definition)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_after_write_after_read(self, version_guid):
# Reading a definition that's been updated after being pulled from the db should
# still get the updated value
self.definition['_id'] = version_guid
self.bulk.get_definition(self.course_key, version_guid)
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.definition)
@ddt.data(True, False)
def test_read_index_without_write_from_db(self, ignore_case):
# Reading the index without writing to it should pull from the database
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.conn.get_course_index.return_value, result)
@ddt.data(True, False)
def test_read_index_without_write_only_reads_once(self, ignore_case):
# Reading the index multiple times should only result in one read from
# the database
for _ in xrange(2):
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.conn.get_course_index.return_value, result)
@ddt.data(True, False)
def test_read_index_after_write(self, ignore_case):
# Reading the index after a write still should hit the database once to fetch the
# initial index, and should return the written index_entry
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.index_entry, result)
def test_read_index_ignore_case(self):
# Reading using ignore case should find an already written entry with a different case
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(
self.course_key.replace(
org=self.course_key.org.upper(),
course=self.course_key.course.title(),
run=self.course_key.run.upper()
),
ignore_case=True
)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.index_entry, result)
def test_version_structure_creates_new_version_before_read(self):
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_version_structure_creates_new_version_after_read(self):
self.conn.get_structure.return_value = copy.deepcopy(self.structure)
self.bulk.get_structure(self.course_key, self.structure['_id'])
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_copy_branch_versions(self):
# Directly updating an index so that the draft branch points to the published index
# version should work, and should only persist a single structure
self.maxDiff = None
published_structure = {'published': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key, published_structure)
index = {'versions': {'published': published_structure['_id']}}
self.bulk.insert_course_index(self.course_key, index)
index_copy = copy.deepcopy(index)
index_copy['versions']['draft'] = index['versions']['published']
self.bulk.update_course_index(self.course_key, index_copy)
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_structure.assert_called_once_with(published_structure)
self.conn.update_course_index.assert_called_once_with(index_copy, from_index=self.conn.get_course_index.return_value)
self.conn.get_course_index.assert_called_once_with(self.course_key)
class TestBulkWriteMixinOpenAfterPrevTransaction(TestBulkWriteMixinOpen, TestBulkWriteMixinPreviousTransaction):
"""
Test that operations on with an open transaction aren't affected by a previously executed transaction
"""
pass
| agpl-3.0 |
sandeepdsouza93/TensorFlow-15712 | tensorflow/python/client/timeline.py | 19 | 23737 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the assicaiated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
nn, rest = label.split(' = ')
op, rest = rest.split('(')
if rest == ')':
inputs = []
else:
inputs = rest[:-1].split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
if is_gputrace:
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
node_name, op = fields[:2]
inputs = []
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace:
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in alloc_list:
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def analyze_step_stats(self, show_dataflow=True, show_memory=True):
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
| apache-2.0 |
lokirius/python-for-android | python3-alpha/python3-src/Lib/plat-freebsd6/IN.py | 172 | 12416 | # Generated by h2py from /usr/include/netinet/in.h
# Included from sys/cdefs.h
__GNUCLIKE_ASM = 3
__GNUCLIKE_ASM = 2
__GNUCLIKE___TYPEOF = 1
__GNUCLIKE___OFFSETOF = 1
__GNUCLIKE___SECTION = 1
__GNUCLIKE_ATTRIBUTE_MODE_DI = 1
__GNUCLIKE_CTOR_SECTION_HANDLING = 1
__GNUCLIKE_BUILTIN_CONSTANT_P = 1
__GNUCLIKE_BUILTIN_VARARGS = 1
__GNUCLIKE_BUILTIN_STDARG = 1
__GNUCLIKE_BUILTIN_VAALIST = 1
__GNUC_VA_LIST_COMPATIBILITY = 1
__GNUCLIKE_BUILTIN_NEXT_ARG = 1
__GNUCLIKE_BUILTIN_MEMCPY = 1
__CC_SUPPORTS_INLINE = 1
__CC_SUPPORTS___INLINE = 1
__CC_SUPPORTS___INLINE__ = 1
__CC_SUPPORTS___FUNC__ = 1
__CC_SUPPORTS_WARNING = 1
__CC_SUPPORTS_VARADIC_XXX = 1
__CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
__CC_INT_IS_32BIT = 1
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __nonnull(x): return __attribute__((__nonnull__(x)))
def __predict_true(exp): return __builtin_expect((exp), 1)
def __predict_false(exp): return __builtin_expect((exp), 0)
def __predict_true(exp): return (exp)
def __predict_false(exp): return (exp)
def __format_arg(fmtarg): return __attribute__((__format_arg__ (fmtarg)))
def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
_POSIX_C_SOURCE = 199009
_POSIX_C_SOURCE = 199209
__XSI_VISIBLE = 600
_POSIX_C_SOURCE = 200112
__XSI_VISIBLE = 500
_POSIX_C_SOURCE = 199506
_POSIX_C_SOURCE = 198808
__POSIX_VISIBLE = 200112
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 199506
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199309
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199209
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199009
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 198808
__ISO_C_VISIBLE = 0
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 200112
__XSI_VISIBLE = 600
__BSD_VISIBLE = 1
__ISO_C_VISIBLE = 1999
# Included from sys/_types.h
# Included from machine/_types.h
# Included from machine/endian.h
_QUAD_HIGHWORD = 1
_QUAD_LOWWORD = 0
_LITTLE_ENDIAN = 1234
_BIG_ENDIAN = 4321
_PDP_ENDIAN = 3412
_BYTE_ORDER = _LITTLE_ENDIAN
LITTLE_ENDIAN = _LITTLE_ENDIAN
BIG_ENDIAN = _BIG_ENDIAN
PDP_ENDIAN = _PDP_ENDIAN
BYTE_ORDER = _BYTE_ORDER
def __word_swap_int_var(x): return \
def __word_swap_int_const(x): return \
def __word_swap_int(x): return __word_swap_int_var(x)
def __byte_swap_int_var(x): return \
def __byte_swap_int_const(x): return \
def __byte_swap_int(x): return __byte_swap_int_var(x)
def __byte_swap_long_var(x): return \
def __byte_swap_long_const(x): return \
def __byte_swap_long(x): return __byte_swap_long_var(x)
def __byte_swap_word_var(x): return \
def __byte_swap_word_const(x): return \
def __byte_swap_word(x): return __byte_swap_word_var(x)
def __htonl(x): return __bswap32(x)
def __htons(x): return __bswap16(x)
def __ntohl(x): return __bswap32(x)
def __ntohs(x): return __bswap16(x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_TCP = 6
IPPROTO_UDP = 17
def htonl(x): return __htonl(x)
def htons(x): return __htons(x)
def ntohl(x): return __ntohl(x)
def ntohs(x): return __ntohs(x)
IPPROTO_RAW = 255
INET_ADDRSTRLEN = 16
IPPROTO_HOPOPTS = 0
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_MOBILE = 55
IPPROTO_TLSP = 56
IPPROTO_SKIP = 57
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_SCTP = 132
IPPROTO_PIM = 103
IPPROTO_CARP = 112
IPPROTO_PGM = 113
IPPROTO_PFSYNC = 240
IPPROTO_OLD_DIVERT = 254
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPROTO_DIVERT = 258
IPPROTO_SPACER = 32767
IPPORT_RESERVED = 1024
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
IPPORT_MAX = 65535
def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_SENDSRCADDR = IP_RECVDSTADDR
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_ONESBCAST = 23
IP_FW_TABLE_ADD = 40
IP_FW_TABLE_DEL = 41
IP_FW_TABLE_FLUSH = 42
IP_FW_TABLE_GETSIZE = 43
IP_FW_TABLE_LIST = 44
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_RECVTTL = 65
IP_MINTTL = 66
IP_DONTFRAG = 67
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
# Included from netinet6/in6.h
__KAME_VERSION = "FreeBSD"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
def IFA6_IS_DEPRECATED(a): return \
def IFA6_IS_INVALID(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_2292PKTINFO = 19
IPV6_2292HOPLIMIT = 20
IPV6_2292NEXTHOP = 21
IPV6_2292HOPOPTS = 22
IPV6_2292DSTOPTS = 23
IPV6_2292RTHDR = 24
IPV6_2292PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_BINDV6ONLY = IPV6_V6ONLY
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDRDSTOPTS = 35
IPV6_RECVPKTINFO = 36
IPV6_RECVHOPLIMIT = 37
IPV6_RECVRTHDR = 38
IPV6_RECVHOPOPTS = 39
IPV6_RECVDSTOPTS = 40
IPV6_RECVRTHDRDSTOPTS = 41
IPV6_USE_MIN_MTU = 42
IPV6_RECVPATHMTU = 43
IPV6_PATHMTU = 44
IPV6_REACHCONF = 45
IPV6_PKTINFO = 46
IPV6_HOPLIMIT = 47
IPV6_NEXTHOP = 48
IPV6_HOPOPTS = 49
IPV6_DSTOPTS = 50
IPV6_RTHDR = 51
IPV6_PKTOPTIONS = 52
IPV6_RECVTCLASS = 57
IPV6_AUTOFLOWLABEL = 59
IPV6_TCLASS = 61
IPV6_DONTFRAG = 62
IPV6_PREFER_TEMPADDR = 63
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_V6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_USETEMPADDR = 32
IPV6CTL_TEMPPLTIME = 33
IPV6CTL_TEMPVLTIME = 34
IPV6CTL_AUTO_LINKLOCAL = 35
IPV6CTL_RIP6STATS = 36
IPV6CTL_PREFER_TEMPADDR = 37
IPV6CTL_ADDRCTLPOLICY = 38
IPV6CTL_USE_DEFAULTZONE = 39
IPV6CTL_MAXFRAGS = 41
IPV6CTL_IFQ = 42
IPV6CTL_ISATAPRTR = 43
IPV6CTL_MCAST_PMTU = 44
IPV6CTL_STEALTH = 45
IPV6CTL_MAXID = 46
| apache-2.0 |
nikolas/lettuce | tests/integration/lib/Django-1.3/django/db/backends/sqlite3/creation.py | 230 | 3239 | import os
import sys
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_for_pending_references(self, model, style, pending_references):
"SQLite3 doesn't support constraints"
return []
def sql_remove_table_constraints(self, model, references_to_delete, style):
"SQLite3 doesn't support constraints"
return []
def _get_test_db_name(self):
test_database_name = self.connection.settings_dict['TEST_NAME']
if test_database_name and test_database_name != ':memory:':
return test_database_name
return ':memory:'
def _create_test_db(self, verbosity, autoclobber):
test_database_name = self._get_test_db_name()
if test_database_name != ':memory:':
# Erase the old test database
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
if os.access(test_database_name, os.F_OK):
if not autoclobber:
confirm = raw_input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
os.remove(test_database_name)
except Exception, e:
sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity):
if test_database_name and test_database_name != ":memory:":
# Remove the SQLite database file
os.remove(test_database_name)
| gpl-3.0 |
dex4er/django | tests/servers/tests.py | 49 | 5773 | # -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase
from django.test.utils import override_settings
from django.utils.http import urlencode
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
urls = 'servers.urls'
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Restore original settings
cls.settings_override.disable()
super(LiveServerBase, cls).tearDownClass()
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception('localhost:8081', ImproperlyConfigured)
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overriden setUpClass() method is executed.
pass
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
f = self.urlopen('/example_view/')
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
f = self.urlopen('/static/example_static_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
f = self.urlopen('/media/example_media_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
f = self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
f = self.urlopen('/model_view/')
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
| bsd-3-clause |
engdan77/edoAutoHomeMobile | twisted/internet/test/test_default.py | 24 | 3539 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.default}.
"""
from __future__ import division, absolute_import
import select, sys
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.runtime import Platform
from twisted.python.reflect import requireModule
from twisted.internet import default
from twisted.internet.default import _getInstallFunction, install
from twisted.internet.test.test_main import NoReactor
from twisted.internet.interfaces import IReactorCore
unix = Platform('posix', 'other')
linux = Platform('posix', 'linux2')
windows = Platform('nt', 'win32')
osx = Platform('posix', 'darwin')
class PollReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the poll(2) or epoll(7)-based reactors.
"""
def assertIsPoll(self, install):
"""
Assert the given function will install the poll() reactor, or select()
if poll() is unavailable.
"""
if hasattr(select, "poll"):
self.assertEqual(
install.__module__, 'twisted.internet.pollreactor')
else:
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
def test_unix(self):
"""
L{_getInstallFunction} chooses the poll reactor on arbitrary Unix
platforms, falling back to select(2) if it is unavailable.
"""
install = _getInstallFunction(unix)
self.assertIsPoll(install)
def test_linux(self):
"""
L{_getInstallFunction} chooses the epoll reactor on Linux, or poll if
epoll is unavailable.
"""
install = _getInstallFunction(linux)
if requireModule('twisted.internet.epollreactor') is None:
self.assertIsPoll(install)
else:
self.assertEqual(
install.__module__, 'twisted.internet.epollreactor')
class SelectReactorTests(SynchronousTestCase):
"""
Tests for the cases of L{twisted.internet.default._getInstallFunction}
in which it picks the select(2)-based reactor.
"""
def test_osx(self):
"""
L{_getInstallFunction} chooses the select reactor on OS X.
"""
install = _getInstallFunction(osx)
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
def test_windows(self):
"""
L{_getInstallFunction} chooses the select reactor on Windows.
"""
install = _getInstallFunction(windows)
self.assertEqual(
install.__module__, 'twisted.internet.selectreactor')
class InstallationTests(SynchronousTestCase):
"""
Tests for actual installation of the reactor.
"""
def test_install(self):
"""
L{install} installs a reactor.
"""
with NoReactor():
install()
self.assertIn("twisted.internet.reactor", sys.modules)
def test_reactor(self):
"""
Importing L{twisted.internet.reactor} installs the default reactor if
none is installed.
"""
installed = []
def installer():
installed.append(True)
return install()
self.patch(default, "install", installer)
with NoReactor():
from twisted.internet import reactor
self.assertTrue(IReactorCore.providedBy(reactor))
self.assertEqual(installed, [True])
| mit |
lshain-android-source/external-chromium_org | chrome/common/extensions/docs/server2/github_file_system_test.py | 23 | 1750 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from appengine_blobstore import AppEngineBlobstore
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import files
from fake_fetchers import ConfigureFakeFetchers
from github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
import url_constants
class GithubFileSystemTest(unittest.TestCase):
def setUp(self):
ConfigureFakeFetchers()
self._base_path = os.path.join(sys.path[0],
'test_data',
'github_file_system')
self._file_system = GithubFileSystem.Create(ObjectStoreCreator.ForTest())
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def testList(self):
self.assertEqual(json.loads(self._ReadLocalFile('expected_list.json')),
self._file_system.Read(['/']).Get())
def testRead(self):
self.assertEqual(self._ReadLocalFile('expected_read.txt'),
self._file_system.ReadSingle('/analytics/launch.js'))
def testStat(self):
self.assertEqual(0, self._file_system.Stat('zipball').version)
def testKeyGeneration(self):
self.assertEqual(0, len(files.GetBlobKeys()))
self._file_system.ReadSingle('/analytics/launch.js')
self.assertEqual(1, len(files.GetBlobKeys()))
self._file_system.ReadSingle('/analytics/main.css')
self.assertEqual(1, len(files.GetBlobKeys()))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/pip/_vendor/pyparsing.py | 417 | 224171 | # module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.popitem(False)
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
prints::
['More', 'Iron', 'Lead', 'Gold', 'I']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, collections.Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| mit |
xR86/Algo | FII-year3sem1-SI/hw1/exe3/key_manager.py | 2 | 2576 | #!/usr/bin/python2.7
from Crypto.Cipher import AES
from Crypto import Random
'''
key = 'Sixteen byte key'
plain_text = 'Attack at dawn'
iv = Random.new().read(AES.block_size)
#if ECB, use PCKS#7
# append bytes to reach mod 16 boundary.
# All padding bytes have the same value: the number of bytes that you are adding:
length = 16 - (len(plain_text) % 16)
plain_text += chr(length)*length
cipher = AES.new(key, AES.MODE_ECB) #AES.new(key, AES.MODE_CFB, iv)
cipher_text = cipher.encrypt(plain_text)
print cipher_text.encode('hex')
decrypt = AES.new(key, AES.MODE_ECB)
dec_plain_text = decrypt.decrypt(cipher_text)
# remove from the back of the plaintext as many bytes as indicated by padding:
dec_plain_text = dec_plain_text[:-ord(dec_plain_text[-1])]
print dec_plain_text
'''
k1 = "ecb-fff byte key" #ECB
k2 = "cfb-fff byte key" #CFB, with iv known by A,B
k3 = "*known key this-" #encrypts k1, k2, is known by all 3 nodes from the start
def encrypt(k, mode):
global k3
key = k3
plain_text = k
#if ECB, use PCKS#7
# append bytes to reach mod 16 boundary.
# All padding bytes have the same value: the number of bytes that you are adding:
length = 16 - (len(plain_text) % 16)
plain_text += chr(length)*length
if mode == 'CFB':
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
else: #if no mode set, presume ECB
cipher = AES.new(key, AES.MODE_ECB)
cipher_text = cipher.encrypt(plain_text)
print cipher_text.encode('hex')
'''
decrypt = AES.new(key, AES.MODE_ECB)
dec_plain_text = decrypt.decrypt(cipher_text)
# remove from the back of the plaintext as many bytes as indicated by padding:
dec_plain_text = dec_plain_text[:-ord(dec_plain_text[-1])]
print dec_plain_text
'''
return cipher_text
import socket
###Acts as a server
sock = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 1595 # Reserve a port for your service.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port)) # Bind to the port
sock.listen(5) # Now wait for client connection.
counter = 0
while (counter < 2):
c, addr = sock.accept() # Establish connection with client.
tmp_name = '[' + chr(ord('A') + counter) + ']'
print tmp_name, 'Got connection from', addr
mode = c.recv(1024)
print tmp_name, ' recv request for mode: ', mode
print tmp_name, ' Sending crypto key'
if mode == 'ECB':
c.send(encrypt(k1, ''))
else:
c.send(encrypt(k2, ''))
c.close() # Close the connection
counter += 1
| gpl-2.0 |
wemanuel/smry | server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/gcloud/sdktools/components/list.py | 11 | 1866 | # Copyright 2013 Google Inc. All Rights Reserved.
"""The command to list installed/available gcloud components."""
import textwrap
from googlecloudsdk.calliope import base
class List(base.Command):
"""List the status of all Cloud SDK components.
List all packages and individual components in the Cloud SDK and provide
information such as whether the component is installed on the local
workstation, whether a newer version is available, the size of the component,
and the ID used to refer to the component in commands.
"""
detailed_help = {
'DESCRIPTION': textwrap.dedent("""\
This command lists all the tools in the Cloud SDK (both individual
components and preconfigured packages of components). For each
component, the command lists the following information:
* Status on your local workstation: not installed, installed (and
up to date), and update available (installed, but not up to date)
* Name of the component (a description)
* ID of the component (used to refer to the component in other
[{parent_command}] commands)
* Size of the component
In addition, if the `--show-versions` flag is specified, the command
lists the currently installed version (if any) and the latest
available version of each individual component.
"""),
'EXAMPLES': textwrap.dedent("""\
$ gcloud components list
$ gcloud components list --show-versions
"""),
}
@staticmethod
def Args(parser):
parser.add_argument(
'--show-versions', required=False, action='store_true',
help='Show installed and available versions of all components.')
def Run(self, args):
"""Runs the list command."""
self.group.update_manager.List(show_versions=args.show_versions)
| apache-2.0 |
ricardaw/pismdev | util/PISMNC.py | 5 | 4793 | #!/usr/bin/env python
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
class PISMDataset(netCDF.Dataset):
def create_time(self, use_bounds = False, length = None, units = None):
self.createDimension('time', size = length)
t_var = self.createVariable('time', 'f8', ('time',))
t_var.axis = "T"
t_var.long_name = "time"
if not units:
t_var.units = "seconds since 1-1-1" # just a default
else:
t_var.units = units
if use_bounds:
self.createDimension('n_bounds', 2)
self.createVariable("time_bounds", 'f8', ('time', 'n_bounds'))
t_var.bounds = "time_bounds"
def create_dimensions(self, x, y, time_dependent = False, use_time_bounds = False):
"""
Create PISM-compatible dimensions in a NetCDF file.
"""
if time_dependent and not 'time' in self.variables.keys():
self.create_time(use_time_bounds)
self.createDimension('x', x.size)
self.createDimension('y', y.size)
x_var = self.createVariable('x', 'f8', ('x',))
x_var[:] = x
y_var = self.createVariable('y', 'f8', ('y',))
y_var[:] = y
x_var.axis = "X"
x_var.long_name = "X-coordinate in Cartesian system"
x_var.units = "m"
x_var.standard_name = "projection_x_coordinate"
y_var.axis = "Y"
y_var.long_name = "Y-coordinate in Cartesian system"
y_var.units = "m"
y_var.standard_name = "projection_y_coordinate"
self.sync()
def append_time(self, value, bounds = None):
if 'time' in self.dimensions.keys():
time = self.variables['time']
N = time.size
time[N] = value
if bounds:
self.variables['time_bounds'][N, :] = bounds
def set_attrs(self, var_name, attrs):
"""attrs should be a list of (name, value) tuples."""
if not attrs:
return
for (name, value) in attrs.iteritems():
setattr(self.variables[var_name], name, value)
def define_2d_field(self, var_name, time_dependent = False, dims = None, nc_type = 'f8', attrs = None):
"""
time_dependent: boolean
dims: an optional list of dimension names. use this to override the
default order ('time', 'y', 'x')
attrs: a dictionary of attributes
"""
if not dims:
if time_dependent:
dims = ('time', 'y', 'x')
else:
dims = ('y', 'x')
try:
var = self.variables[var_name]
except:
var = self.createVariable(var_name, nc_type, dims)
self.set_attrs(var_name, attrs)
return var
def define_timeseries(self, var_name, attrs = None):
try:
var = self.createVariable(var_name, 'f8', ('time',))
except:
var = self.variables[var_name]
self.set_attrs(var_name, attrs)
return var
def write(self, var_name, data, time_dependent = False):
"""
Write time-series or a 2D field to a file.
"""
if data.ndim == 1:
return self.write_timeseries(var_name, data)
elif data.ndim == 2:
return self.write_2d_field(var_name, data, time_dependent)
else:
return None
def write_2d_field(self, var_name, data, time_dependent = False):
"""
Write a 2D numpy array to a file in a format PISM can read.
"""
var = self.define_2d_field(var_name, time_dependent, None)
if time_dependent:
last_record = self.variables['time'].size - 1
var[last_record,:,:] = data
else:
var[:] = data
return var
def write_timeseries(self, var_name, data):
"""Write a 1D (time-series) array to a file."""
var = self.define_timeseries(var_name, None)
var[:] = data
return var
if __name__ == "__main__":
# produce a NetCDF file for testing
from numpy import linspace, meshgrid
nc = PISMDataset("foo.nc", 'w')
x = linspace(-100, 100, 101)
y = linspace(-100, 100, 201)
xx, yy = meshgrid(x, y)
nc.create_dimensions(x, y, time_dependent = True, use_time_bounds = True)
nc.define_2d_field("xx", time_dependent = True,
attrs = {"long_name" : "xx",
"comment" : "test variable",
"valid_range" : (-200.0, 200.0)})
for t in [0, 1, 2, 3]:
nc.append_time(t, (t-1, t))
nc.write("xx", xx + t, time_dependent = True)
nc.write("yy", yy + 2*t, time_dependent = True)
nc.close()
| gpl-2.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.5/django/contrib/markup/templatetags/markup.py | 104 | 3499 | """
Set of "markup" template filters for Django. These filters transform plain text
markup syntaxes to HTML; currently there is support for:
* Textile, which requires the PyTextile library available at
http://loopcore.com/python-textile/
* Markdown, which requires the Python-markdown library from
http://www.freewisdom.org/projects/python-markdown
* reStructuredText, which requires docutils from http://docutils.sf.net/
"""
from django import template
from django.conf import settings
from django.utils.encoding import force_bytes, force_text
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True)
def textile(value):
try:
import textile
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'textile' filter: The Python textile library isn't installed.")
return force_text(value)
else:
return mark_safe(force_text(textile.textile(force_bytes(value), encoding='utf-8', output='utf-8')))
@register.filter(is_safe=True)
def markdown(value, arg=''):
"""
Runs Markdown over a given value, optionally using various
extensions python-markdown supports.
Syntax::
{{ value|markdown:"extension1_name,extension2_name..." }}
To enable safe mode, which strips raw HTML and only returns HTML
generated by actual Markdown syntax, pass "safe" as the first
extension in the list.
If the version of Markdown in use does not support extensions,
they will be silently ignored.
"""
import warnings
warnings.warn('The markdown filter has been deprecated',
category=DeprecationWarning)
try:
import markdown
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'markdown' filter: The Python markdown library isn't installed.")
return force_text(value)
else:
markdown_vers = getattr(markdown, "version_info", 0)
if markdown_vers < (2, 1):
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'markdown' filter: Django does not support versions of the Python markdown library < 2.1.")
return force_text(value)
else:
extensions = [e for e in arg.split(",") if e]
if extensions and extensions[0] == "safe":
extensions = extensions[1:]
return mark_safe(markdown.markdown(
force_text(value), extensions, safe_mode=True, enable_attributes=False))
else:
return mark_safe(markdown.markdown(
force_text(value), extensions, safe_mode=False))
@register.filter(is_safe=True)
def restructuredtext(value):
import warnings
warnings.warn('The restructuredtext filter has been deprecated',
category=DeprecationWarning)
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in 'restructuredtext' filter: The Python docutils library isn't installed.")
return force_text(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {})
parts = publish_parts(source=force_bytes(value), writer_name="html4css1", settings_overrides=docutils_settings)
return mark_safe(force_text(parts["fragment"]))
| apache-2.0 |
ElementsProject/elements | test/functional/test_framework/address.py | 1 | 5297 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode Bitcoin addresses.
- base58 P2PKH and P2SH addresses.
- bech32 segwit v0 P2WPKH and P2WSH addresses."""
import enum
import unittest
from .script import hash256, hash160, sha256, CScript, OP_0
from .segwit_addr import encode_segwit_address
from .util import assert_equal, hex_str_to_bytes
ADDRESS_BCRT1_UNSPENDABLE = 'ert1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq458dk'
ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(ert1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq458dk)#446fqfj4'
# Coins sent to this address can be spent with a witness stack of just OP_TRUE
ADDRESS_BCRT1_P2WSH_OP_TRUE = 'ert1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqp24xws'
class AddressType(enum.Enum):
bech32 = 'bech32'
p2sh_segwit = 'p2sh-segwit'
legacy = 'legacy' # P2PKH
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = b.hex()
str = chr(version).encode('latin-1').hex() + str
checksum = hash256(hex_str_to_bytes(str)).hex()
str += checksum[:8]
value = int('0x' + str, 0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
def base58_to_byte(s):
"""Converts a base58-encoded string to its data and version.
Throws if the base58 checksum is invalid."""
if not s:
return b''
n = 0
for c in s:
n *= 58
assert c in chars
digit = chars.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = n.to_bytes((n.bit_length() + 7) // 8, 'big')
pad = 0
for c in s:
if c == chars[0]:
pad += 1
else:
break
res = b'\x00' * pad + res
# Assert if the checksum is invalid
assert_equal(hash256(res[:-4])[:4], res[-4:])
return res[1:-4], int(res[0])
def keyhash_to_p2pkh(hash, main=False):
assert len(hash) == 20
version = 235
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main=False, prefix=75):
assert len(hash) == 20
version = prefix
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main=False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main=False, prefix=75):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main, prefix)
def key_to_p2sh_p2wpkh(key, main=False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main=False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return encode_segwit_address("ert", version, program)
def script_to_p2wsh(script, main=False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main=False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main=False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert False
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert False
class TestFrameworkScript(unittest.TestCase):
def test_base58encodedecode(self):
def check_base58(data, version):
self.assertEqual(base58_to_byte(byte_to_base58(data, version)), (data, version))
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111)
check_base58(bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0)
check_base58(bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0)
check_base58(bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
check_base58(bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0)
| mit |
mikey1234/script.module.urlresolver | lib/urlresolver/plugins/crunchyroll.py | 3 | 3019 | '''
Crunchyroll urlresolver plugin
Copyright (C) 2013 voinage
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import re
import urllib2
from urlresolver import common
import os
class crunchyrollResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "crunchyroll"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
#http://www.crunchyroll.co.uk/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286
#http://www.crunchyroll.com/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
html=self.net.http_GET('http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s'%media_id,{'Host':'www.crunchyroll.com',
'X-Device-Uniqueidentifier':'ffffffff-931d-1f73-ffff-ffffaf02fc5f',
'X-Device-Manufacturer':'HTC',
'X-Device-Model':'HTC Desire',
'X-Application-Name':'com.crunchyroll.crunchyroid',
'X-Device-Product':'htc_bravo',
'X-Device-Is-GoogleTV':'0'}).content
mp4=re.compile(r'"video_url":"(.+?)","h"').findall(html.replace('\\',''))[0]
return mp4
except Exception, e:
common.addon.log_error('**** Crunchyroll Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]CRUNCHYROLL[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s' % media_id
def get_host_and_id(self, url):
r = re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url) or 'crunchyroll' in host)
| gpl-2.0 |
iRGBit/QGIS | python/plugins/processing/tests/TestData.py | 38 | 2242 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TestData.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os.path
from processing.tools import dataobjects
dataFolder = os.path.join(os.path.dirname(__file__), 'data')
def table():
return os.path.join(dataFolder, 'table.dbf')
def points():
return os.path.join(dataFolder, 'points.shp')
def points2():
return os.path.join(dataFolder, 'points2.shp')
def raster():
return os.path.join(dataFolder, 'raster.tif')
def lines():
return os.path.join(dataFolder, 'lines.shp')
def polygons():
return os.path.join(dataFolder, 'polygons.shp')
def polygons2():
return os.path.join(dataFolder, 'polygons2.shp')
def polygonsGeoJson():
return os.path.join(dataFolder, 'polygons.geojson')
def union():
return os.path.join(dataFolder, 'union.shp')
def loadTestData():
dataobjects.load(points(), 'points')
dataobjects.load(points2(), 'points2')
dataobjects.load(polygons(), 'polygons')
dataobjects.load(polygons2(), 'polygons2')
dataobjects.load(polygonsGeoJson(), 'polygonsGeoJson')
dataobjects.load(lines(), 'lines')
dataobjects.load(raster(), 'raster')
dataobjects.load(table(), 'table')
dataobjects.load(union(), 'union')
| gpl-2.0 |
tersmitten/ansible | lib/ansible/modules/cloud/ovirt/ovirt_external_provider.py | 52 | 14722 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_external_provider
short_description: Module to manage external providers in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage external providers in oVirt/RHV"
options:
name:
description:
- "Name of the external provider to manage."
state:
description:
- "Should the external be present or absent"
- "When you are using absent for I(os_volume), you need to make
sure that SD is not attached to the data center!"
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the external provider."
type:
description:
- "Type of the external provider."
choices: ['os_image', 'network', 'os_volume', 'foreman']
url:
description:
- "URL where external provider is hosted."
- "Applicable for those types: I(os_image), I(os_volume), I(network) and I(foreman)."
username:
description:
- "Username to be used for login to external provider."
- "Applicable for all types."
password:
description:
- "Password of the user specified in C(username) parameter."
- "Applicable for all types."
tenant_name:
description:
- "Name of the tenant."
- "Applicable for those types: I(os_image), I(os_volume) and I(network)."
aliases: ['tenant']
authentication_url:
description:
- "Keystone authentication URL of the openstack provider."
- "Applicable for those types: I(os_image), I(os_volume) and I(network)."
aliases: ['auth_url']
data_center:
description:
- "Name of the data center where provider should be attached."
- "Applicable for those type: I(os_volume)."
read_only:
description:
- "Specify if the network should be read only."
- "Applicable if C(type) is I(network)."
type: bool
network_type:
description:
- "Type of the external network provider either external (for example OVN) or neutron."
- "Applicable if C(type) is I(network)."
choices: ['external', 'neutron']
default: ['external']
authentication_keys:
description:
- "List of authentication keys. Each key is represented by dict
like {'uuid': 'our-uuid', 'value': 'YourSecretValue=='}"
- "When you will not pass these keys and there are already some
of them defined in the system they will be removed."
- "Applicable for I(os_volume)."
default: []
version_added: "2.6"
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add image external provider:
- ovirt_external_provider:
name: image_provider
type: os_image
url: http://1.2.3.4:9292
username: admin
password: 123456
tenant: admin
auth_url: http://1.2.3.4:35357/v2.0
# Add volume external provider:
- ovirt_external_provider:
name: image_provider
type: os_volume
url: http://1.2.3.4:9292
username: admin
password: 123456
tenant: admin
auth_url: http://1.2.3.4:5000/v2.0
authentication_keys:
-
uuid: "1234567-a1234-12a3-a234-123abc45678"
value: "ABCD00000000111111222333445w=="
# Add foreman provider:
- ovirt_external_provider:
name: foreman_provider
type: foreman
url: https://foreman.example.com
username: admin
password: 123456
# Add external network provider for OVN:
- ovirt_external_provider:
name: ovn_provider
type: network
network_type: external
url: http://1.2.3.4:9696
# Remove image external provider:
- ovirt_external_provider:
state: absent
name: image_provider
type: os_image
'''
RETURN = '''
id:
description: ID of the external provider which is managed
returned: On success if external provider is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
external_host_provider:
description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
returned: "On success and if parameter 'type: foreman' is used."
type: dict
openstack_image_provider:
description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
returned: "On success and if parameter 'type: os_image' is used."
type: dict
openstack_volume_provider:
description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
returned: "On success and if parameter 'type: os_volume' is used."
type: dict
openstack_network_provider:
description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
returned: "On success and if parameter 'type: network' is used."
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
)
OS_VOLUME = 'os_volume'
OS_IMAGE = 'os_image'
NETWORK = 'network'
FOREMAN = 'foreman'
class ExternalProviderModule(BaseModule):
non_provider_params = ['type', 'authentication_keys', 'data_center']
def provider_type(self, provider_type):
self._provider_type = provider_type
def provider_module_params(self):
provider_params = [
(key, value) for key, value in self._module.params.items() if key
not in self.non_provider_params
]
provider_params.append(('data_center', self.get_data_center()))
return provider_params
def get_data_center(self):
dc_name = self._module.params.get("data_center", None)
if dc_name:
system_service = self._connection.system_service()
data_centers_service = system_service.data_centers_service()
return data_centers_service.list(
search='name=%s' % dc_name,
)[0]
return dc_name
def build_entity(self):
provider_type = self._provider_type(
requires_authentication=self._module.params.get('username') is not None,
)
if self._module.params.pop('type') == NETWORK:
setattr(
provider_type,
'type',
otypes.OpenStackNetworkProviderType(self._module.params.pop('network_type'))
)
for key, value in self.provider_module_params():
if hasattr(provider_type, key):
setattr(provider_type, key, value)
return provider_type
def update_check(self, entity):
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('url'), entity.url) and
equal(self._module.params.get('authentication_url'), entity.authentication_url) and
equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and
equal(self._module.params.get('username'), entity.username)
)
def update_volume_provider_auth_keys(
self, provider, providers_service, keys
):
"""
Update auth keys for volume provider, if not exist add them or remove
if they are not specified and there are already defined in the external
volume provider.
Args:
provider (dict): Volume provider details.
providers_service (openstack_volume_providers_service): Provider
service.
keys (list): Keys to be updated/added to volume provider, each key
is represented as dict with keys: uuid, value.
"""
provider_service = providers_service.provider_service(provider['id'])
auth_keys_service = provider_service.authentication_keys_service()
provider_keys = auth_keys_service.list()
# removing keys which are not defined
for key in [
k.id for k in provider_keys if k.uuid not in [
defined_key['uuid'] for defined_key in keys
]
]:
self.changed = True
if not self._module.check_mode:
auth_keys_service.key_service(key).remove()
if not (provider_keys or keys):
# Nothing need to do when both are empty.
return
for key in keys:
key_id_for_update = None
for existing_key in provider_keys:
if key['uuid'] == existing_key.uuid:
key_id_for_update = existing_key.id
auth_key_usage_type = (
otypes.OpenstackVolumeAuthenticationKeyUsageType("ceph")
)
auth_key = otypes.OpenstackVolumeAuthenticationKey(
usage_type=auth_key_usage_type,
uuid=key['uuid'],
value=key['value'],
)
if not key_id_for_update:
self.changed = True
if not self._module.check_mode:
auth_keys_service.add(auth_key)
else:
# We cannot really distinguish here if it was really updated cause
# we cannot take key value to check if it was changed or not. So
# for sure we update here always.
self.changed = True
if not self._module.check_mode:
auth_key_service = (
auth_keys_service.key_service(key_id_for_update)
)
auth_key_service.update(auth_key)
def _external_provider_service(provider_type, system_service):
if provider_type == OS_IMAGE:
return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service()
elif provider_type == NETWORK:
return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service()
elif provider_type == OS_VOLUME:
return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service()
elif provider_type == FOREMAN:
return otypes.ExternalHostProvider, system_service.external_host_providers_service()
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None),
description=dict(default=None),
type=dict(
default=None,
required=True,
choices=[
OS_IMAGE, NETWORK, OS_VOLUME, FOREMAN,
],
aliases=['provider'],
),
url=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
tenant_name=dict(default=None, aliases=['tenant']),
authentication_url=dict(default=None, aliases=['auth_url']),
data_center=dict(default=None),
read_only=dict(default=None, type='bool'),
network_type=dict(
default='external',
choices=['external', 'neutron'],
),
authentication_keys=dict(
default=[], aliases=['auth_keys'], type='list', no_log=True,
),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
provider_type_param = module.params.get('type')
provider_type, external_providers_service = _external_provider_service(
provider_type=provider_type_param,
system_service=connection.system_service(),
)
external_providers_module = ExternalProviderModule(
connection=connection,
module=module,
service=external_providers_service,
)
external_providers_module.provider_type(provider_type)
state = module.params.pop('state')
if state == 'absent':
ret = external_providers_module.remove()
elif state == 'present':
ret = external_providers_module.create()
openstack_volume_provider_id = ret.get('id')
if (
provider_type_param == OS_VOLUME and
openstack_volume_provider_id
):
external_providers_module.update_volume_provider_auth_keys(
ret, external_providers_service,
module.params.get('authentication_keys'),
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
megaprojectske/megaprojects.co.ke | megaprojects/articles/mixins.py | 1 | 1028 | # See: http://hunterford.me/django-custom-model-manager-chaining/
import models
class ArticleManagerMixin(object):
def published(self, status=True):
if status:
return self.filter(status=models.Article.STATUS_PUBLISHED)
else:
return self.filter(status=models.Article.STATUS_DRAFT)
def articles(self):
return self.filter(kind=models.Article.KIND_ARTICLE)
def blog(self):
return self.filter(kind=models.Article.KIND_BLOG)
def featured(self):
return self.filter(kind=models.Article.KIND_FEATURE)
class ImageManagerMixin(object):
def published(self, status=True):
return self.filter(article__status=models.Article.STATUS_PUBLISHED).filter(status=status)
def articles(self):
return self.filter(article__kind=models.Article.KIND_ARTICLE)
def blog(self):
return self.filter(article__kind=models.Article.KIND_BLOG)
def featured(self):
return self.filter(article__kind=models.Article.KIND_FEATURE)
| apache-2.0 |
tryton-ar/padron_afip_ar | padron_afip.py | 1 | 7151 | #! -*- coding: utf8 -*-
"Herramienta para procesar y consultar el Padrón Unico de Contribuyentes AFIP"
# Documentación e información adicional:
# http://www.sistemasagiles.com.ar/trac/wiki/PadronContribuyentesAFIP
# Basado en pyafipws padron.py de Mariano Reingart
from trytond.model import ModelView, ModelSQL, fields
from trytond.pool import Pool
from trytond.wizard import Wizard, StateView, Button, StateTransition
import urllib2
import os
#import shelve
#import sqlite3
import zipfile
from email.utils import formatdate
# Tipos de datos (código RG1361)
N = 'Numerico' # 2
A = 'Alfanumerico' # 3
I = 'Importe' # 4
C = A # 1 (caracter alfabetico)
B = A # 9 (blanco)
# formato y ubicación archivo completo de la condición tributaria según RG 1817
FORMATO = [
("cuit", 11, N, ""),
("denominacion", 30, A, ""),
("imp_ganancias", 2, A, "'NI', 'AC','EX', 'NC'"),
("imp_iva", 2, A, "'NI' , 'AC','EX','NA','XN','AN'"),
("monotributo", 2, A, "'NI', 'Codigo categoria tributaria'"),
("integrante_soc", 1, A, "'N' , 'S'"),
("empleador", 1, A, "'N', 'S'"),
("actividad_monotributo", 2, A, ""),
]
__all__ = ['PadronAfip', 'PadronAfipStart', 'PadronAfipImport']
class PadronAfip(ModelSQL, ModelView):
"padron.afip"
__name__ = "padron.afip"
denominacion = fields.Char('denominacion')
imp_ganancias = fields.Char('imp_ganancias')
imp_iva = fields.Char('imp_iva')
monotributo = fields.Char('monotributo')
integrante_soc = fields.Char('integrante_soc')
empleador = fields.Char('empleador')
actividad_monotributo = fields.Char('actividad_monotributo')
cuit = fields.Char('CUIT')
class PadronAfipStart(ModelView):
"padron afip start"
__name__ = "padron.afip.import.start"
class PadronAfipImport(Wizard):
"padron afip wizard import"
__name__ = "padron.afip.import"
start = StateView('padron.afip.import.start', 'padron_afip_ar.padron_afip_import_form', [
Button('Cancel','end', 'tryton-cancel'),
Button('Import', 'download_import', 'tryton-ok', default=True)
])
download_import = StateTransition()
def transition_download_import(self):
url = "http://www.afip.gob.ar/genericos/cInscripcion/archivos/apellidoNombreDenominacion.zip"
self._descargar(url)
self._procesar()
return 'end'
def _descargar(self, url, filename="padron.txt", proxy=None):
#import sys
#from utils import leer, escribir, N, A, I, get_install_dir
"Descarga el archivo de AFIP, devuelve 200 o 304 si no fue modificado"
proxies = {}
if proxy:
proxies['http'] = proxy
proxies['https'] = proxy
print "Abriendo URL %s ..." % url
req = urllib2.Request(url)
if os.path.exists(filename):
http_date = formatdate(timeval=os.path.getmtime(filename),
localtime=False, usegmt=True)
req.add_header('If-Modified-Since', http_date)
try:
web = urllib2.urlopen(req)
except urllib2.HTTPError, e:
if e.code == 304:
print "No modificado desde", http_date
return 304
else:
raise
# leer info del request:
meta = web.info()
lenght = float(meta['Content-Length'])
tmp = open(filename + ".zip", "wb")
print "Guardando"
size = 0
p0 = None
while True:
p = int(size / lenght * 100)
if p0 is None or p>p0:
print "Descargando ... %0d %%" % p
p0 = p
data = web.read(1024*100)
size = size + len(data)
if not data:
print "Descarga Terminada!"
break
tmp.write(data)
print "Abriendo ZIP..."
tmp.close()
web.close()
uf = open(filename + ".zip", "rb")
zf = zipfile.ZipFile(uf)
for fn in zf.namelist():
print "descomprimiendo", fn
tf = open(filename, "wb")
tf.write(zf.read(fn))
tf.close()
return 200
def _procesar(self, filename="padron.txt"):
"Analiza y crea la base de datos interna sqlite para consultas"
PadronAfip = Pool().get('padron.afip')
f = open(filename, "r")
keys = [k for k, l, t, d in FORMATO]
# conversion a planilla csv (no usado)
if False and not os.path.exists("padron.csv"):
csvfile = open('padron.csv', 'wb')
import csv
wr = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, l in enumerate(f):
if i % 100000 == 0:
print "Progreso: %d registros" % i
r = self._leer(l, FORMATO)
row = [r[k] for k in keys]
wr.writerow(row)
csvfile.close()
f.seek(0)
for i, l in enumerate(f):
if i % 10000 == 0: print i
registro = self._leer(l, FORMATO)
registro['cuit'] = str(registro['cuit'])
PadronAfip.create([registro])
def _leer(self, linea, formato, expandir_fechas=False):
"Analiza una linea de texto dado un formato, devuelve un diccionario"
dic = {}
comienzo = 1
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = (len(fmt)>3 and isinstance(fmt[3], int)) and fmt[3] or 2
valor = linea[comienzo-1:comienzo-1+longitud].strip()
try:
if chr(8) in valor or chr(127) in valor or chr(255) in valor:
valor = None # nulo
elif tipo == N:
if valor:
valor = long(valor)
else:
valor = 0
elif tipo == I:
if valor:
try:
if '.' in valor:
valor = float(valor)
else:
valor = valor.strip(" ")
valor = float(("%%s.%%0%sd" % dec) % (long(valor[:-dec] or '0'), int(valor[-dec:] or '0')))
except ValueError:
raise ValueError("Campo invalido: %s = '%s'" % (clave, valor))
else:
valor = 0.00
elif expandir_fechas and clave.lower().startswith("fec") and longitud <= 8:
if valor:
valor = "%s-%s-%s" % (valor[0:4], valor[4:6], valor[6:8])
else:
valor = None
else:
valor = valor.decode("ascii","ignore")
dic[clave] = valor
comienzo += longitud
except Exception, e:
raise ValueError("Error al leer campo %s pos %s val '%s': %s" % (
clave, comienzo, valor, str(e)))
return dic
| gpl-2.0 |
ojengwa/oh-mainline | vendor/packages/docutils/test/test_readers/test_pep/test_rfc2822.py | 19 | 6073 | #! /usr/bin/env python
# $Id: test_rfc2822.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for RFC-2822 headers in PEPs (readers/pep.py).
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.PEPParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['rfc2822'] = [
["""\
Author: Me
Version: 1
Date: 2002-04-23
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<field>
<field_name>
Version
<field_body>
<paragraph>
1
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
"""],
["""\
Author: Me
Version: 1
Date: 2002-04-23
.. Leading blank lines don't affect RFC-2822 header parsing.
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<field>
<field_name>
Version
<field_body>
<paragraph>
1
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
<comment xml:space="preserve">
Leading blank lines don't affect RFC-2822 header parsing.
"""],
["""\
.. A comment should prevent RFC-2822 header parsing.
Author: Me
Version: 1
Date: 2002-04-23
""",
"""\
<document source="test data">
<comment xml:space="preserve">
A comment should prevent RFC-2822 header parsing.
<paragraph>
Author: Me
Version: 1
Date: 2002-04-23
"""],
["""\
Author: Me
Version: 1
Date: 2002-04-23
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<paragraph>
Version: 1
Date: 2002-04-23
"""],
["""\
field:
empty item above, no blank line
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
field
<field_body>
<system_message level="2" line="2" source="test data" type="WARNING">
<paragraph>
RFC2822-style field list ends without a blank line; unexpected unindent.
<paragraph>
empty item above, no blank line
"""],
["""\
Author:
Me
Version:
1
Date:
2002-04-23
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Author
<field_body>
<paragraph>
Me
<field>
<field_name>
Version
<field_body>
<paragraph>
1
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
"""],
["""\
Authors: Me,
Myself,
and I
Version: 1
or so
Date: 2002-04-23
(Tuesday)
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<paragraph>
Me,
Myself,
and I
<field>
<field_name>
Version
<field_body>
<paragraph>
1
or so
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
(Tuesday)
"""],
["""\
Authors: Me,
Myself,
and I
Version: 1
or so
Date: 2002-04-23
(Tuesday)
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<paragraph>
Me,
Myself,
and I
<field>
<field_name>
Version
<field_body>
<paragraph>
1
or so
<field>
<field_name>
Date
<field_body>
<paragraph>
2002-04-23
(Tuesday)
"""],
["""\
Authors: - Me
- Myself
- I
Version:
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<bullet_list bullet="-">
<list_item>
<paragraph>
Me
<list_item>
<paragraph>
Myself
<list_item>
<paragraph>
I
<field>
<field_name>
Version
<field_body>
"""],
["""\
Authors: Me
Myself and I
Version:
""",
"""\
<document source="test data">
<field_list classes="rfc2822">
<field>
<field_name>
Authors
<field_body>
<paragraph>
Me
<block_quote>
<paragraph>
Myself and I
<system_message level="2" line="4" source="test data" type="WARNING">
<paragraph>
Block quote ends without a blank line; unexpected unindent.
<paragraph>
Version:
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
mitsuhiko/sentry | src/sentry/models/auditlogentry.py | 1 | 9023 | """
sentry.models.auditlogentry
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
Model, BoundedPositiveIntegerField, FlexibleForeignKey, GzippedDictField,
sane_repr
)
class AuditLogEntryEvent(object):
MEMBER_INVITE = 1
MEMBER_ADD = 2
MEMBER_ACCEPT = 3
MEMBER_EDIT = 4
MEMBER_REMOVE = 5
MEMBER_JOIN_TEAM = 6
MEMBER_LEAVE_TEAM = 7
ORG_ADD = 10
ORG_EDIT = 11
ORG_REMOVE = 12
TEAM_ADD = 20
TEAM_EDIT = 21
TEAM_REMOVE = 22
PROJECT_ADD = 30
PROJECT_EDIT = 31
PROJECT_REMOVE = 32
PROJECT_SET_PUBLIC = 33
PROJECT_SET_PRIVATE = 34
TAGKEY_REMOVE = 40
PROJECTKEY_ADD = 50
PROJECTKEY_EDIT = 51
PROJECTKEY_REMOVE = 52
PROJECTKEY_ENABLE = 53
PROJECTKEY_DISABLE = 53
SSO_ENABLE = 60
SSO_DISABLE = 61
SSO_EDIT = 62
SSO_IDENTITY_LINK = 63
APIKEY_ADD = 70
APIKEY_EDIT = 71
APIKEY_REMOVE = 72
class AuditLogEntry(Model):
__core__ = False
organization = FlexibleForeignKey('sentry.Organization')
actor_label = models.CharField(max_length=64, null=True, blank=True)
# if the entry was created via a user
actor = FlexibleForeignKey('sentry.User', related_name='audit_actors',
null=True, blank=True)
# if the entry was created via an api key
actor_key = FlexibleForeignKey('sentry.ApiKey', null=True, blank=True)
target_object = BoundedPositiveIntegerField(null=True)
target_user = FlexibleForeignKey('sentry.User', null=True, blank=True,
related_name='audit_targets')
# TODO(dcramer): we want to compile this mapping into JSX for the UI
event = BoundedPositiveIntegerField(choices=(
# We emulate github a bit with event naming
(AuditLogEntryEvent.MEMBER_INVITE, 'member.invite'),
(AuditLogEntryEvent.MEMBER_ADD, 'member.add'),
(AuditLogEntryEvent.MEMBER_ACCEPT, 'member.accept-invite'),
(AuditLogEntryEvent.MEMBER_REMOVE, 'member.remove'),
(AuditLogEntryEvent.MEMBER_EDIT, 'member.edit'),
(AuditLogEntryEvent.MEMBER_JOIN_TEAM, 'member.join-team'),
(AuditLogEntryEvent.MEMBER_LEAVE_TEAM, 'member.leave-team'),
(AuditLogEntryEvent.TEAM_ADD, 'team.create'),
(AuditLogEntryEvent.TEAM_EDIT, 'team.edit'),
(AuditLogEntryEvent.TEAM_REMOVE, 'team.remove'),
(AuditLogEntryEvent.PROJECT_ADD, 'project.create'),
(AuditLogEntryEvent.PROJECT_EDIT, 'project.edit'),
(AuditLogEntryEvent.PROJECT_REMOVE, 'project.remove'),
(AuditLogEntryEvent.PROJECT_SET_PUBLIC, 'project.set-public'),
(AuditLogEntryEvent.PROJECT_SET_PRIVATE, 'project.set-private'),
(AuditLogEntryEvent.ORG_ADD, 'org.create'),
(AuditLogEntryEvent.ORG_EDIT, 'org.edit'),
(AuditLogEntryEvent.ORG_REMOVE, 'org.remove'),
(AuditLogEntryEvent.TAGKEY_REMOVE, 'tagkey.remove'),
(AuditLogEntryEvent.PROJECTKEY_ADD, 'projectkey.create'),
(AuditLogEntryEvent.PROJECTKEY_EDIT, 'projectkey.edit'),
(AuditLogEntryEvent.PROJECTKEY_REMOVE, 'projectkey.remove'),
(AuditLogEntryEvent.PROJECTKEY_ENABLE, 'projectkey.enable'),
(AuditLogEntryEvent.PROJECTKEY_DISABLE, 'projectkey.disable'),
(AuditLogEntryEvent.SSO_ENABLE, 'sso.enable'),
(AuditLogEntryEvent.SSO_DISABLE, 'sso.disable'),
(AuditLogEntryEvent.SSO_EDIT, 'sso.edit'),
(AuditLogEntryEvent.SSO_IDENTITY_LINK, 'sso-identity.link'),
(AuditLogEntryEvent.APIKEY_ADD, 'api-key.create'),
(AuditLogEntryEvent.APIKEY_EDIT, 'api-key.edit'),
(AuditLogEntryEvent.APIKEY_REMOVE, 'api-key.remove'),
))
ip_address = models.GenericIPAddressField(null=True, unpack_ipv4=True)
data = GzippedDictField()
datetime = models.DateTimeField(default=timezone.now)
class Meta:
app_label = 'sentry'
db_table = 'sentry_auditlogentry'
__repr__ = sane_repr('organization_id', 'type')
def save(self, *args, **kwargs):
if not self.actor_label:
assert self.actor or self.actor_key
if self.actor:
self.actor_label = self.actor.username
else:
self.actor_label = self.actor_key.key
super(AuditLogEntry, self).save(*args, **kwargs)
def get_actor_name(self):
if self.actor:
return self.actor.get_display_name()
elif self.actor_key:
return self.actor_key.key + ' (api key)'
return self.actor_label
def get_note(self):
if self.event == AuditLogEntryEvent.MEMBER_INVITE:
return 'invited member %s' % (self.data['email'],)
elif self.event == AuditLogEntryEvent.MEMBER_ADD:
if self.target_user == self.actor:
return 'joined the organization'
return 'added member %s' % (self.target_user.get_display_name(),)
elif self.event == AuditLogEntryEvent.MEMBER_ACCEPT:
return 'accepted the membership invite'
elif self.event == AuditLogEntryEvent.MEMBER_REMOVE:
if self.target_user == self.actor:
return 'left the organization'
return 'removed member %s' % (self.data.get('email') or self.target_user.get_display_name(),)
elif self.event == AuditLogEntryEvent.MEMBER_EDIT:
return 'edited member %s' % (self.data.get('email') or self.target_user.get_display_name(),)
elif self.event == AuditLogEntryEvent.MEMBER_JOIN_TEAM:
if self.target_user == self.actor:
return 'joined team %s' % (self.data['team_slug'],)
return 'added %s to team %s' % (
self.data.get('email') or self.target_user.get_display_name(),
self.data['team_slug'],
)
elif self.event == AuditLogEntryEvent.MEMBER_LEAVE_TEAM:
if self.target_user == self.actor:
return 'left team %s' % (self.data['team_slug'],)
return 'removed %s from team %s' % (
self.data.get('email') or self.target_user.get_display_name(),
self.data['team_slug'],
)
elif self.event == AuditLogEntryEvent.ORG_ADD:
return 'created the organization'
elif self.event == AuditLogEntryEvent.ORG_EDIT:
return 'edited the organization'
elif self.event == AuditLogEntryEvent.TEAM_ADD:
return 'created team %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.TEAM_EDIT:
return 'edited team %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.TEAM_REMOVE:
return 'removed team %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.PROJECT_ADD:
return 'created project %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.PROJECT_EDIT:
return 'edited project %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.PROJECT_REMOVE:
return 'removed project %s' % (self.data['slug'],)
elif self.event == AuditLogEntryEvent.TAGKEY_REMOVE:
return 'removed tags matching %s = *' % (self.data['key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_ADD:
return 'added project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_EDIT:
return 'edited project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_REMOVE:
return 'removed project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_ENABLE:
return 'enabled project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.PROJECTKEY_DISABLE:
return 'disabled project key %s' % (self.data['public_key'],)
elif self.event == AuditLogEntryEvent.SSO_ENABLE:
return 'enabled sso (%s)' % (self.data['provider'],)
elif self.event == AuditLogEntryEvent.SSO_DISABLE:
return 'disabled sso (%s)' % (self.data['provider'],)
elif self.event == AuditLogEntryEvent.SSO_EDIT:
return 'edited sso settings'
elif self.event == AuditLogEntryEvent.SSO_IDENTITY_LINK:
return 'linked their account to a new identity'
elif self.event == AuditLogEntryEvent.APIKEY_ADD:
return 'added api key %s' % (self.data['label'],)
elif self.event == AuditLogEntryEvent.APIKEY_EDIT:
return 'edited api key %s' % (self.data['label'],)
elif self.event == AuditLogEntryEvent.APIKEY_REMOVE:
return 'removed api key %s' % (self.data['label'],)
return ''
| bsd-3-clause |
sbellem/django | django/test/runner.py | 148 | 14807 | import logging
import os
import unittest
from importlib import import_module
from unittest import TestSuite, defaultTestLoader
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase, TestCase
from django.test.utils import setup_test_environment, teardown_test_environment
from django.utils.datastructures import OrderedSet
from django.utils.six import StringIO
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super(DebugSQLTextTestResult, self).__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super(DebugSQLTextTestResult, self).startTest(test)
def stopTest(self, test):
super(DebugSQLTextTestResult, self).stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super(DebugSQLTextTestResult, self).addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super(DebugSQLTextTestResult, self).addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % sql_debug)
class DiscoverRunner(object):
"""
A Django test runner that uses unittest2 test discovery.
"""
test_suite = TestSuite
test_runner = unittest.TextTestRunner
test_loader = defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_sql=False, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_sql = debug_sql
@classmethod
def add_arguments(cls, parser):
parser.add_argument('-t', '--top-level-directory',
action='store', dest='top_level', default=None,
help='Top level of project for unittest discovery.')
parser.add_argument('-p', '--pattern', action='store', dest='pattern',
default="test*.py",
help='The test matching pattern. Defaults to test*.py.')
parser.add_argument('-k', '--keepdb', action='store_true', dest='keepdb',
default=False,
help='Preserves the test DB between runs.')
parser.add_argument('-r', '--reverse', action='store_true', dest='reverse',
default=False,
help='Reverses test cases order.')
parser.add_argument('-d', '--debug-sql', action='store_true', dest='debug_sql',
default=False,
help='Prints logged SQL queries on failure.')
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, self.reorder_by, self.reverse)
def setup_databases(self, **kwargs):
return setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
**kwargs
)
def get_resultclass(self):
return DebugSQLTextTestResult if self.debug_sql else None
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=resultclass,
).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity, self.keepdb)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def reorder_suite(suite, classes, reverse=False):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, tests within classes are sorted in opposite order,
but test classes are not reversed.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite(suite, classes, bins, reverse=False):
"""
Partitions a test suite by test type. Also prevents duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get("TEST", {}).get("SERIALIZE", True),
)
destroy = True
else:
connection.settings_dict['NAME'] = test_db_name
destroy = False
old_names.append((connection, db_name, destroy))
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names, mirrors
| bsd-3-clause |
mday299/ardupilot | Tools/autotest/pysim/iris_ros.py | 81 | 3622 | #!/usr/bin/env python
"""
Python interface to euroc ROS multirotor simulator
See https://pixhawk.org/dev/ros/sitl
"""
import time
import mav_msgs.msg as mav_msgs
import px4.msg as px4
import rosgraph_msgs.msg as rosgraph_msgs
import rospy
import sensor_msgs.msg as sensor_msgs
from aircraft import Aircraft
from rotmat import Vector3, Matrix3
def quat_to_dcm(q1, q2, q3, q4):
"""Convert quaternion to DCM."""
q3q3 = q3 * q3
q3q4 = q3 * q4
q2q2 = q2 * q2
q2q3 = q2 * q3
q2q4 = q2 * q4
q1q2 = q1 * q2
q1q3 = q1 * q3
q1q4 = q1 * q4
q4q4 = q4 * q4
m = Matrix3()
m.a.x = 1.0-2.0*(q3q3 + q4q4)
m.a.y = 2.0*(q2q3 - q1q4)
m.a.z = 2.0*(q2q4 + q1q3)
m.b.x = 2.0*(q2q3 + q1q4)
m.b.y = 1.0-2.0*(q2q2 + q4q4)
m.b.z = 2.0*(q3q4 - q1q2)
m.c.x = 2.0*(q2q4 - q1q3)
m.c.y = 2.0*(q3q4 + q1q2)
m.c.z = 1.0-2.0*(q2q2 + q3q3)
return m
class IrisRos(Aircraft):
"""A IRIS MultiCopter from ROS."""
def __init__(self):
Aircraft.__init__(self)
self.max_rpm = 1200
self.have_new_time = False
self.have_new_imu = False
self.have_new_pos = False
topics = {
"/clock" : (self.clock_cb, rosgraph_msgs.Clock),
"/iris/imu" : (self.imu_cb, sensor_msgs.Imu),
"/iris/vehicle_local_position" : (self.pos_cb, px4.vehicle_local_position),
}
rospy.init_node('ArduPilot', anonymous=True)
for topic in topics.keys():
(callback, msgtype) = topics[topic]
rospy.Subscriber(topic, msgtype, callback)
self.motor_pub = rospy.Publisher('/iris/command/motor_speed',
mav_msgs.CommandMotorSpeed,
queue_size=1)
self.last_time = 0
# spin() simply keeps python from exiting until this node is stopped
# rospy.spin()
def clock_cb(self, msg):
self.time_now = self.time_base + msg.clock.secs + msg.clock.nsecs*1.0e-9
self.have_new_time = True
def imu_cb(self, msg):
self.gyro = Vector3(msg.angular_velocity.x,
-msg.angular_velocity.y,
-msg.angular_velocity.z)
self.accel_body = Vector3(msg.linear_acceleration.x,
-msg.linear_acceleration.y,
-msg.linear_acceleration.z)
self.dcm = quat_to_dcm(msg.orientation.w,
msg.orientation.x,
-msg.orientation.y,
-msg.orientation.z)
self.have_new_imu = True
def pos_cb(self, msg):
self.velocity = Vector3(msg.vx, msg.vy, msg.vz)
self.position = Vector3(msg.x, msg.y, msg.z)
self.have_new_pos = True
def update(self, actuators):
while self.last_time == self.time_now or not self.have_new_time or not self.have_new_imu or not self.have_new_pos:
time.sleep(0.001)
self.have_new_time = False
self.have_new_pos = False
self.have_new_imu = False
# create motor speed message
msg = mav_msgs.CommandMotorSpeed()
msg.header.stamp = rospy.get_rostime()
motor_speed = []
for i in range(len(actuators)):
motor_speed.append(actuators[i]*self.max_rpm)
msg.motor_speed = motor_speed
self.last_time = self.time_now
self.motor_pub.publish(msg)
# update lat/lon/altitude
self.update_position()
| gpl-3.0 |
RedstonerServer/redstoner-utils | iptracker.py | 1 | 4991 | import mysqlhack
import org.bukkit as bukkit
import json
from java.util import UUID as UUID
from helpers import *
from org.bukkit import *
from traceback import format_exc as trace
from iptracker_secrets import *
iptrack_permission = "utils.iptrack"
iptrack_version = "1.1.0"
@hook.event("player.PlayerJoinEvent", "low")
def on_player_join(event):
t = threading.Thread(target=on_player_join_thread, args=(event, ))
t.daemon = True
t.start()
def on_player_join_thread(event):
player = event.getPlayer()
ip = player.getAddress().getHostString()
uuid = uid(player)
conn = zxJDBC.connect(mysql_database, mysql_user, mysql_pass, "com.mysql.jdbc.Driver")
curs = conn.cursor()
curs.execute("SELECT ips FROM uuid2ips WHERE uuid = ?", (uuid, ))
results = curs.fetchall()
if len(results) == 0:
ips = []
else:
ips = json.loads(results[0][0])
curs.execute("SELECT uuids FROM ip2uuids WHERE ip = ?", (ip, ))
results = curs.fetchall()
if len(results) == 0:
uuids = []
else:
uuids = json.loads(results[0][0])
new_ip_entry = (len(ips) == 0)
new_uuid_entry = (len(uuids) == 0)
if ip not in ips:
ips.append(ip)
if new_ip_entry:
curs.execute("INSERT INTO uuid2ips VALUES (?,?)", (uuid, json.dumps(ips), ))
else:
curs.execute("UPDATE uuid2ips SET ips = ? WHERE uuid = ?", (json.dumps(ips), uuid, ))
if uuid not in uuids:
uuids.append(uuid)
if new_uuid_entry:
curs.execute("INSERT INTO ip2uuids VALUES (?,?)", (ip, json.dumps(uuids), ))
else:
curs.execute("UPDATE ip2uuids SET uuids = ? WHERE ip = ?", (json.dumps(uuids), ip, ))
conn.commit()
curs.close()
conn.close()
@hook.command("getinfo")
def on_getinfo_command(sender, args):
t = threading.Thread(target=on_getinfo_command_thread, args=(sender, args))
t.daemon = True
t.start()
def on_getinfo_command_thread(sender, args):
if(sender.hasPermission(iptrack_permission)):
if not checkargs(sender, args, 1, 1):
return False
else:
if is_ip(args[0]):
conn = zxJDBC.connect(mysql_database, mysql_user, mysql_pass, "com.mysql.jdbc.Driver")
curs = conn.cursor()
curs.execute("SELECT uuids FROM ip2uuids WHERE ip = ?", (args[0], ))
results = curs.fetchall()
curs.close()
conn.close()
if len(results) == 0:
msg(sender, "IP " + args[0] + " is not registered in the database, maybe you got a number wrong?")
else:
uuids = json.loads(results[0][0])
msg(sender, "IP " + args[0] + " was seen with " + str(len(uuids)) + " different Accounts:")
for i in range(0, len(uuids)):
p=Bukkit.getOfflinePlayer(UUID.fromString(uuids[i]))
if is_player(sender):
send_JSON_message(sender.getName(), '["",{"text":"' + p.getName() + ' - (uuid: ' + uuids[i] + '","color":"gold","clickEvent":{"action":"run_command","value":"/getinfo ' + p.getName() + '"},"hoverEvent":{"action":"show_text","value":{"text":"","extra":[{"text":"To search for ' + p.getName() + ' in the database, simply click the name!","color":"gold"}]}}}]')
else:
msg(sender,p.getName() + " - (uuid: " + uuids[i] + ")")
else:
target = Bukkit.getOfflinePlayer(args[0])
uuid = target.getUniqueId()
conn = zxJDBC.connect(mysql_database, mysql_user, mysql_pass, "com.mysql.jdbc.Driver")
curs = conn.cursor()
curs.execute("SELECT ips FROM uuid2ips WHERE uuid = ?", (uuid.toString(), ))
results = curs.fetchall()
curs.close()
conn.close()
if len(results) == 0:
msg(sender, "Player " + args[0] + " is not registered in the database, maybe you misspelled the name?")
else:
ips = json.loads(results[0][0])
msg(sender, "Player " + args[0] + " was seen with " + str(len(ips)) + " different IPs:")
for i in range(0, len(ips)):
if is_player(sender):
send_JSON_message(sender.getName(), '["",{"text":"' + ips[i] + '","color":"gold","clickEvent":{"action":"run_command","value":"/getinfo ' + ips[i] + '"},"hoverEvent":{"action":"show_text","value":{"text":"","extra":[{"text":"To search for the IP ' + ips[i] + ' in the database, simply click the IP!","color":"gold"}]}}}]')
else:
msg(sender,ips[i])
else:
noperm(sender)
return True
| mit |
HeraclesHX/scikit-learn | sklearn/datasets/california_housing.py | 198 | 3877 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : ndarray, shape [20640, 8]
Each row corresponding to the 8 feature values in order.
dataset.target : numpy array of shape (20640,)
Each value corresponds to the average house value in units of 100,000.
dataset.feature_names : array of length 8
Array of ordered feature names used in the dataset.
dataset.DESCR : string
Description of the California housing dataset.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
| bsd-3-clause |
PythonScientists/Shape | env/lib/python3.5/site-packages/click/__init__.py | 135 | 2858 | # -*- coding: utf-8 -*-
"""
click
~~~~~
Click is a simple Python module that wraps the stdlib's optparse to make
writing command line scripts fun. Unlike other modules, it's based around
a simple API that does not come with too much magic and is composable.
In case optparse ever gets removed from the stdlib, it will be shipped by
this module.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
# Core classes
from .core import Context, BaseCommand, Command, MultiCommand, Group, \
CommandCollection, Parameter, Option, Argument
# Globals
from .globals import get_current_context
# Decorators
from .decorators import pass_context, pass_obj, make_pass_decorator, \
command, group, argument, option, confirmation_option, \
password_option, version_option, help_option
# Types
from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED
# Utilities
from .utils import echo, get_binary_stream, get_text_stream, open_file, \
format_filename, get_app_dir, get_os_args
# Terminal functions
from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
pause
# Exceptions
from .exceptions import ClickException, UsageError, BadParameter, \
FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
MissingParameter
# Formatting
from .formatting import HelpFormatter, wrap_text
# Parsing
from .parser import OptionParser
__all__ = [
# Core classes
'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
'CommandCollection', 'Parameter', 'Option', 'Argument',
# Globals
'get_current_context',
# Decorators
'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
'argument', 'option', 'confirmation_option', 'password_option',
'version_option', 'help_option',
# Types
'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING',
'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
# Utilities
'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
'format_filename', 'get_app_dir', 'get_os_args',
# Terminal functions
'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
'getchar', 'pause',
# Exceptions
'ClickException', 'UsageError', 'BadParameter', 'FileError',
'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
'MissingParameter',
# Formatting
'HelpFormatter', 'wrap_text',
# Parsing
'OptionParser',
]
# Controls if click should emit the warning about the use of unicode
# literals.
disable_unicode_literals_warning = False
__version__ = '6.7'
| apache-2.0 |
elopio/snapcraft | snapcraft/cli/version.py | 1 | 1079 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import click
import snapcraft
SNAPCRAFT_VERSION_TEMPLATE = 'snapcraft, version %(version)s'
@click.group()
def versioncli():
"""Version commands"""
pass
@versioncli.command('version')
def version():
"""Obtain snapcraft's version number.
Examples:
snapcraft version
snapcraft --version
"""
click.echo(SNAPCRAFT_VERSION_TEMPLATE % {'version': snapcraft.__version__})
| gpl-3.0 |
titom1986/CouchPotatoServer | libs/tmdb3/util.py | 32 | 16758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: util.py Assorted utilities used in tmdb_api
# Python Library
# Author: Raymond Wagner
#-----------------------
from copy import copy
from locales import get_locale
from tmdb_auth import get_session
class NameRepr(object):
"""Mixin for __repr__ methods using 'name' attribute."""
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}'>"\
.format(self).encode('utf-8')
class SearchRepr(object):
"""
Mixin for __repr__ methods for classes with '_name' and
'_request' attributes.
"""
def __repr__(self):
name = self._name if self._name else self._request._kwargs['query']
return u"<Search Results: {0}>".format(name).encode('utf-8')
class Poller(object):
"""
Wrapper for an optional callable to populate an Element derived
class with raw data, or data from a Request.
"""
def __init__(self, func, lookup, inst=None):
self.func = func
self.lookup = lookup
self.inst = inst
if func:
# with function, this allows polling data from the API
self.__doc__ = func.__doc__
self.__name__ = func.__name__
self.__module__ = func.__module__
else:
# without function, this is just a dummy poller used for applying
# raw data to a new Element class with the lookup table
self.__name__ = '_populate'
def __get__(self, inst, owner):
# normal decorator stuff
# return self for a class
# return instantiated copy of self for an object
if inst is None:
return self
func = None
if self.func:
func = self.func.__get__(inst, owner)
return self.__class__(func, self.lookup, inst)
def __call__(self):
# retrieve data from callable function, and apply
if not callable(self.func):
raise RuntimeError('Poller object called without a source function')
req = self.func()
if ('language' in req._kwargs) or ('country' in req._kwargs) \
and self.inst._locale.fallthrough:
# request specifies a locale filter, and fallthrough is enabled
# run a first pass with specified filter
if not self.apply(req.readJSON(), False):
return
# if first pass results in missed data, run a second pass to
# fill in the gaps
self.apply(req.new(language=None, country=None).readJSON())
# re-apply the filtered first pass data over top the second
# unfiltered set. this is to work around the issue that the
# properties have no way of knowing when they should or
# should not overwrite existing data. the cache engine will
# take care of the duplicate query
self.apply(req.readJSON())
def apply(self, data, set_nones=True):
# apply data directly, bypassing callable function
unfilled = False
for k, v in self.lookup.items():
if (k in data) and \
((data[k] is not None) if callable(self.func) else True):
# argument received data, populate it
setattr(self.inst, v, data[k])
elif v in self.inst._data:
# argument did not receive data, but Element already contains
# some value, so skip this
continue
elif set_nones:
# argument did not receive data, so fill it with None
# to indicate such and prevent a repeat scan
setattr(self.inst, v, None)
else:
# argument does not need data, so ignore it allowing it to
# trigger a later poll. this is intended for use when
# initializing a class with raw data, or when performing a
# first pass through when performing locale fall through
unfilled = True
return unfilled
class Data(object):
"""
Basic response definition class
This maps to a single key in a JSON dictionary received from the API
"""
def __init__(self, field, initarg=None, handler=None, poller=None,
raw=True, default=u'', lang=None, passthrough={}):
"""
This defines how the dictionary value is to be processed by the
poller
field -- defines the dictionary key that filters what data
this uses
initarg -- (optional) specifies that this field must be
supplied when creating a new instance of the Element
class this definition is mapped to. Takes an integer
for the order it should be used in the input
arguments
handler -- (optional) callable used to process the received
value before being stored in the Element object.
poller -- (optional) callable to be used if data is requested
and this value has not yet been defined. the
callable should return a dictionary of data from a
JSON query. many definitions may share a single
poller, which will be and the data used to populate
all referenced definitions based off their defined
field
raw -- (optional) if the specified handler is an Element
class, the data will be passed into it using the
'raw' keyword attribute. setting this to false
will force the data to instead be passed in as the
first argument
"""
self.field = field
self.initarg = initarg
self.poller = poller
self.raw = raw
self.default = default
self.sethandler(handler)
self.passthrough = passthrough
def __get__(self, inst, owner):
if inst is None:
return self
if self.field not in inst._data:
if self.poller is None:
return None
self.poller.__get__(inst, owner)()
return inst._data[self.field]
def __set__(self, inst, value):
if (value is not None) and (value != ''):
value = self.handler(value)
else:
value = self.default
if isinstance(value, Element):
value._locale = inst._locale
value._session = inst._session
for source, dest in self.passthrough:
setattr(value, dest, getattr(inst, source))
inst._data[self.field] = value
def sethandler(self, handler):
# ensure handler is always callable, even for passthrough data
if handler is None:
self.handler = lambda x: x
elif isinstance(handler, ElementType) and self.raw:
self.handler = lambda x: handler(raw=x)
else:
self.handler = lambda x: handler(x)
class Datapoint(Data):
pass
class Datalist(Data):
"""
Response definition class for list data
This maps to a key in a JSON dictionary storing a list of data
"""
def __init__(self, field, handler=None, poller=None, sort=None, raw=True, passthrough={}):
"""
This defines how the dictionary value is to be processed by the
poller
field -- defines the dictionary key that filters what data
this uses
handler -- (optional) callable used to process the received
value before being stored in the Element object.
poller -- (optional) callable to be used if data is requested
and this value has not yet been defined. the
callable should return a dictionary of data from a
JSON query. many definitions may share a single
poller, which will be and the data used to populate
all referenced definitions based off their defined
field
sort -- (optional) name of attribute in resultant data to be
used to sort the list after processing. this
effectively requires a handler be defined to process
the data into something that has attributes
raw -- (optional) if the specified handler is an Element
class, the data will be passed into it using the
'raw' keyword attribute. setting this to false will
force the data to instead be passed in as the first
argument
"""
super(Datalist, self).__init__(field, None, handler, poller, raw, passthrough=passthrough)
self.sort = sort
def __set__(self, inst, value):
data = []
if value:
for val in value:
val = self.handler(val)
if isinstance(val, Element):
val._locale = inst._locale
val._session = inst._session
for source, dest in self.passthrough.items():
setattr(val, dest, getattr(inst, source))
data.append(val)
if self.sort:
if self.sort is True:
data.sort()
else:
data.sort(key=lambda x: getattr(x, self.sort))
inst._data[self.field] = data
class Datadict(Data):
"""
Response definition class for dictionary data
This maps to a key in a JSON dictionary storing a dictionary of data
"""
def __init__(self, field, handler=None, poller=None, raw=True,
key=None, attr=None, passthrough={}):
"""
This defines how the dictionary value is to be processed by the
poller
field -- defines the dictionary key that filters what data
this uses
handler -- (optional) callable used to process the received
value before being stored in the Element object.
poller -- (optional) callable to be used if data is requested
and this value has not yet been defined. the
callable should return a dictionary of data from a
JSON query. many definitions may share a single
poller, which will be and the data used to populate
all referenced definitions based off their defined
field
key -- (optional) name of key in resultant data to be used
as the key in the stored dictionary. if this is not
the field name from the source data is used instead
attr -- (optional) name of attribute in resultant data to be
used as the key in the stored dictionary. if this is
not the field name from the source data is used
instead
raw -- (optional) if the specified handler is an Element
class, the data will be passed into it using the
'raw' keyword attribute. setting this to false will
force the data to instead be passed in as the first
argument
"""
if key and attr:
raise TypeError("`key` and `attr` cannot both be defined")
super(Datadict, self).__init__(field, None, handler, poller, raw, passthrough=passthrough)
if key:
self.getkey = lambda x: x[key]
elif attr:
self.getkey = lambda x: getattr(x, attr)
else:
raise TypeError("Datadict requires `key` or `attr` be defined " +
"for populating the dictionary")
def __set__(self, inst, value):
data = {}
if value:
for val in value:
val = self.handler(val)
if isinstance(val, Element):
val._locale = inst._locale
val._session = inst._session
for source, dest in self.passthrough.items():
setattr(val, dest, getattr(inst, source))
data[self.getkey(val)] = val
inst._data[self.field] = data
class ElementType( type ):
"""
MetaClass used to pre-process Element-derived classes and set up the
Data definitions
"""
def __new__(mcs, name, bases, attrs):
# any Data or Poller object defined in parent classes must be cloned
# and processed in this class to function properly
# scan through available bases for all such definitions and insert
# a copy into this class's attributes
# run in reverse order so higher priority values overwrite lower ones
data = {}
pollers = {'_populate':None}
for base in reversed(bases):
if isinstance(base, mcs):
for k, attr in base.__dict__.items():
if isinstance(attr, Data):
# extract copies of each defined Data element from
# parent classes
attr = copy(attr)
attr.poller = attr.poller.func
data[k] = attr
elif isinstance(attr, Poller):
# extract copies of each defined Poller function
# from parent classes
pollers[k] = attr.func
for k, attr in attrs.items():
if isinstance(attr, Data):
data[k] = attr
if '_populate' in attrs:
pollers['_populate'] = attrs['_populate']
# process all defined Data attribues, testing for use as an initial
# argument, and building a list of what Pollers are used to populate
# which Data points
pollermap = dict([(k, []) for k in pollers])
initargs = []
for k, v in data.items():
v.name = k
if v.initarg:
initargs.append(v)
if v.poller:
pn = v.poller.__name__
if pn not in pollermap:
pollermap[pn] = []
if pn not in pollers:
pollers[pn] = v.poller
pollermap[pn].append(v)
else:
pollermap['_populate'].append(v)
# wrap each used poller function with a Poller class, and push into
# the new class attributes
for k, v in pollermap.items():
if len(v) == 0:
continue
lookup = dict([(attr.field, attr.name) for attr in v])
poller = Poller(pollers[k], lookup)
attrs[k] = poller
# backfill wrapped Poller into each mapped Data object, and ensure
# the data elements are defined for this new class
for attr in v:
attr.poller = poller
attrs[attr.name] = attr
# build sorted list of arguments used for intialization
attrs['_InitArgs'] = tuple(
[a.name for a in sorted(initargs, key=lambda x: x.initarg)])
return type.__new__(mcs, name, bases, attrs)
def __call__(cls, *args, **kwargs):
obj = cls.__new__(cls)
if ('locale' in kwargs) and (kwargs['locale'] is not None):
obj._locale = kwargs['locale']
else:
obj._locale = get_locale()
if 'session' in kwargs:
obj._session = kwargs['session']
else:
obj._session = get_session()
obj._data = {}
if 'raw' in kwargs:
# if 'raw' keyword is supplied, create populate object manually
if len(args) != 0:
raise TypeError(
'__init__() takes exactly 2 arguments (1 given)')
obj._populate.apply(kwargs['raw'], False)
else:
# if not, the number of input arguments must exactly match that
# defined by the Data definitions
if len(args) != len(cls._InitArgs):
raise TypeError(
'__init__() takes exactly {0} arguments ({1} given)'\
.format(len(cls._InitArgs)+1, len(args)+1))
for a, v in zip(cls._InitArgs, args):
setattr(obj, a, v)
obj.__init__()
return obj
class Element( object ):
__metaclass__ = ElementType
_lang = 'en'
| gpl-3.0 |
tobegit3hub/deep_cnn | java_predict_client/src/main/proto/tensorflow/contrib/slim/python/slim/learning_test.py | 9 | 31311 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from numpy import testing as np_testing
import tensorflow as tf
slim = tf.contrib.slim
class ClipGradientNormsTest(tf.test.TestCase):
def clip_values(self, arr):
norm = np.sqrt(np.sum(arr**2))
if norm > self._max_norm:
return self._max_norm * arr / np.sqrt(np.sum(arr**2))
return arr
def setUp(self):
np.random.seed(0)
self._max_norm = 1.0
self._grad_vec = np.array([1., 2., 3.])
self._clipped_grad_vec = self.clip_values(self._grad_vec)
self._zero_vec = np.zeros(self._grad_vec.size)
def testOrdinaryGradIsClippedCorrectly(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = slim.learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
# Ensure the variable passed through.
self.assertEqual(gradients_to_variables[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(gradients_to_variables[0])
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
def testNoneGradPassesThroughCorrectly(self):
gradient = None
variable = tf.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = slim.learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
self.assertEqual(gradients_to_variables[0], None)
self.assertEqual(gradients_to_variables[1], variable)
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = tf.constant(self._grad_vec, dtype=tf.float32)
indices = tf.constant(sparse_grad_indices, dtype=tf.int32)
dense_shape = tf.constant(sparse_grad_dense_shape, dtype=tf.int32)
gradient = tf.IndexedSlices(values, indices, dense_shape)
variable = tf.Variable(self._zero_vec, dtype=tf.float32)
gradients_to_variables = (gradient, variable)
gradients_to_variables = slim.learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradients_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with tf.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
class MultiplyGradientsTest(tf.test.TestCase):
def setUp(self):
np.random.seed(0)
self._multiplier = 3.7
self._grad_vec = np.array([1., 2., 3.])
self._multiplied_grad_vec = np.multiply(self._grad_vec, self._multiplier)
def testNonListGradsRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
slim.learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testEmptyMultiplesRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
slim.learning.multiply_gradients([grad_to_var], {})
def testNonDictMultiplierRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
slim.learning.multiply_gradients([grad_to_var], 3)
def testMultipleOfNoneGradRaisesError(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (None, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
slim.learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testMultipleGradientsWithVariables(self):
gradient = tf.constant(self._grad_vec, dtype=tf.float32)
variable = tf.Variable(tf.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = slim.learning.multiply_gradients(
[grad_to_var],
gradient_multipliers)
# Ensure the variable passed through.
self.assertEqual(grad_to_var[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(actual_gradient,
self._multiplied_grad_vec, 5)
def testIndexedSlicesGradIsMultiplied(self):
values = tf.constant(self._grad_vec, dtype=tf.float32)
indices = tf.constant([0, 1, 2], dtype=tf.int32)
dense_shape = tf.constant([self._grad_vec.size], dtype=tf.int32)
gradient = tf.IndexedSlices(values, indices, dense_shape)
variable = tf.Variable(tf.zeros((1, 3)))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = slim.learning.multiply_gradients(
[grad_to_var],
gradient_multipliers)
# Ensure the built IndexedSlice has the right form.
self.assertEqual(grad_to_var[1], variable)
self.assertEqual(grad_to_var[0].indices, indices)
self.assertEqual(grad_to_var[0].dense_shape, dense_shape)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0].values)
np_testing.assert_almost_equal(actual_gradient,
self._multiplied_grad_vec, 5)
def LogisticClassifier(inputs):
return slim.fully_connected(
inputs, 1, activation_fn=tf.sigmoid)
def BatchNormClassifier(inputs):
inputs = slim.batch_norm(inputs, decay=0.1)
return slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid)
class TrainBNClassifierTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertLess(loss, .1)
class CreateTrainOpTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testUseUpdateOps(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = BatchNormClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer,
update_ops=[])
moving_mean = tf.contrib.framework.get_variables_by_name('moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testRecordTrainOpInCollection(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in tf.get_collection(tf.GraphKeys.TRAIN_OP))
class TrainTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNonDefaultGraph(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithNoneAsLogdir(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, None, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithSessionConfig(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
session_config = tf.ConfigProto(allow_soft_placement=True)
loss = slim.learning.train(
train_op,
None,
number_of_steps=300,
log_every_n_steps=10,
session_config=session_config)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithTrace(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
tf.summary.scalar('total_loss', total_loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(
os.path.isfile(os.path.join(logdir, trace_filename)))
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
tf.summary.scalar('total_loss', total_loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
summary_op = tf.summary.merge_all()
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
saver = tf.train.Saver()
with self.assertRaises(ValueError):
slim.learning.train(
train_op, None, init_op=None, number_of_steps=300, saver=saver)
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
with self.assertRaises(RuntimeError):
slim.learning.train(
train_op, logdir, init_op=None, number_of_steps=300)
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with tf.Graph().as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
local_multiplier = slim.local_variable(1.0)
tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
number_of_steps = [300, 301, 305]
for i in range(len(number_of_steps)):
with tf.Graph().as_default():
tf.set_random_seed(i)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(
total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir, number_of_steps=number_of_steps[i],
log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = tf.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return slim.learning.create_train_op(
total_loss, optimizer,
gradient_multipliers=gradient_multipliers)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs2')
# First, train the model one step (make sure the error is high).
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with tf.Graph().as_default():
tf.set_random_seed(1)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with tf.Graph().as_default():
tf.set_random_seed(2)
train_op = self.create_train_op()
model_variables = tf.all_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
init_op = tf.global_variables_initializer()
op, init_feed_dict = slim.assign_from_checkpoint(
model_path, model_variables)
def InitAssignFn(sess):
sess.run(op, init_feed_dict)
loss = slim.learning.train(
train_op,
logdir2,
number_of_steps=1,
init_op=init_op,
init_fn=InitAssignFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def testTrainWithInitFromFn(self):
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs2')
# First, train the model one step (make sure the error is high).
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=1)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with tf.Graph().as_default():
tf.set_random_seed(1)
train_op = self.create_train_op()
loss = slim.learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
# Finally, advance the model a single step and validate that the loss is
# still low.
with tf.Graph().as_default():
tf.set_random_seed(2)
train_op = self.create_train_op()
model_variables = tf.all_variables()
model_path = os.path.join(logdir1, 'model.ckpt-300')
saver = tf.train.Saver(model_variables)
def RestoreFn(sess):
saver.restore(sess, model_path)
loss = slim.learning.train(
train_op,
logdir2,
number_of_steps=1,
init_fn=RestoreFn)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def ModelLoss(self):
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
slim.losses.log_loss(tf_predictions, tf_labels)
return slim.losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
# First, train only the weights of the model.
with tf.Graph().as_default():
tf.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
weights = slim.get_variables_by_name('weights')
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
variables_to_train=weights)
loss = slim.learning.train(
train_op, logdir1, number_of_steps=200, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with tf.Graph().as_default():
tf.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
biases = slim.get_variables_by_name('biases')
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
variables_to_train=biases)
loss = slim.learning.train(
train_op, logdir1, number_of_steps=300, log_every_n_steps=10)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with tf.Graph().as_default():
tf.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
train_op = slim.learning.create_train_op(total_loss, optimizer)
loss = slim.learning.train(
train_op, logdir1, number_of_steps=400, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with tf.Graph().as_default():
tf.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = slim.get_variables()
train_op = slim.learning.create_train_op(total_loss, optimizer)
train_weights = slim.learning.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = slim.learning.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with tf.Session() as sess:
# Initialize the variables.
sess.run(tf.global_variables_initializer())
# Get the intial weights and biases values.
weights_values, biases_values = sess.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = sess.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = sess.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = sess.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = sess.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
logdir1 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs1')
logdir2 = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs2')
multipliers = [1., 1000.]
number_of_steps = 10
losses = []
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate,
gradient_multiplier=multipliers[0])
loss = slim.learning.train(
train_op, logdir1, number_of_steps=number_of_steps)
losses.append(loss)
self.assertGreater(loss, .5)
# Second, train the model with equivalently larger learning rate.
with tf.Graph().as_default():
tf.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate,
gradient_multiplier=multipliers[1])
loss = slim.learning.train(
train_op, logdir2, number_of_steps=number_of_steps)
losses.append(loss)
self.assertIsNotNone(loss)
self.assertLess(loss, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(losses[0], losses[1])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
edry/edx-platform | lms/djangoapps/courseware/tests/test_word_cloud.py | 134 | 8327 | # -*- coding: utf-8 -*-
"""Word cloud integration tests using mongo modulestore."""
import json
from operator import itemgetter
from nose.plugins.attrib import attr
from . import BaseTestXmodule
from xmodule.x_module import STUDENT_VIEW
@attr('shard_1')
class TestWordCloud(BaseTestXmodule):
"""Integration test for word cloud xmodule."""
CATEGORY = "word_cloud"
def _get_users_state(self):
"""Return current state for each user:
{username: json_state}
"""
# check word cloud response for every user
users_state = {}
for user in self.users:
response = self.clients[user.username].post(self.get_url('get_state'))
users_state[user.username] = json.loads(response.content)
return users_state
def _post_words(self, words):
"""Post `words` and return current state for each user:
{username: json_state}
"""
users_state = {}
for user in self.users:
response = self.clients[user.username].post(
self.get_url('submit'),
{'student_words[]': words},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
users_state[user.username] = json.loads(response.content)
return users_state
def _check_response(self, response_contents, correct_jsons):
"""Utility function that compares correct and real responses."""
for username, content in response_contents.items():
# Used in debugger for comparing objects.
# self.maxDiff = None
# We should compare top_words for manually,
# because they are unsorted.
keys_to_compare = set(content.keys()).difference(set(['top_words']))
self.assertDictEqual(
{k: content[k] for k in keys_to_compare},
{k: correct_jsons[username][k] for k in keys_to_compare})
# comparing top_words:
top_words_content = sorted(
content['top_words'],
key=itemgetter('text')
)
top_words_correct = sorted(
correct_jsons[username]['top_words'],
key=itemgetter('text')
)
self.assertListEqual(top_words_content, top_words_correct)
def test_initial_state(self):
"""Inital state of word cloud is correct. Those state that
is sended from server to frontend, when students load word
cloud page.
"""
users_state = self._get_users_state()
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
# correct initial data:
correct_initial_data = {
u'status': u'success',
u'student_words': {},
u'total_count': 0,
u'submitted': False,
u'top_words': {},
u'display_student_percents': False
}
for _, response_content in users_state.items():
self.assertEquals(response_content, correct_initial_data)
def test_post_words(self):
"""Students can submit data succesfully.
Word cloud data properly updates after students submit.
"""
input_words = [
"small",
"BIG",
" Spaced ",
" few words",
]
correct_words = [
u"small",
u"big",
u"spaced",
u"few words",
]
users_state = self._post_words(input_words)
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
correct_state = {}
for index, user in enumerate(self.users):
correct_state[user.username] = {
u'status': u'success',
u'submitted': True,
u'display_student_percents': True,
u'student_words': {word: 1 + index for word in correct_words},
u'total_count': len(input_words) * (1 + index),
u'top_words': [
{
u'text': word, u'percent': 100 / len(input_words),
u'size': (1 + index)
}
for word in correct_words
]
}
self._check_response(users_state, correct_state)
def test_collective_users_submits(self):
"""Test word cloud data flow per single and collective users submits.
Make sures that:
1. Inital state of word cloud is correct. Those state that
is sended from server to frontend, when students load word
cloud page.
2. Students can submit data succesfully.
3. Next submits produce "already voted" error. Next submits for user
are not allowed by user interface, but techically it possible, and
word_cloud should properly react.
4. State of word cloud after #3 is still as after #2.
"""
# 1.
users_state = self._get_users_state()
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
# 2.
# Invcemental state per user.
users_state_after_post = self._post_words(['word1', 'word2'])
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state_after_post.items()
])),
'success')
# Final state after all posts.
users_state_before_fail = self._get_users_state()
# 3.
users_state_after_post = self._post_words(
['word1', 'word2', 'word3'])
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state_after_post.items()
])),
'fail')
# 4.
current_users_state = self._get_users_state()
self._check_response(users_state_before_fail, current_users_state)
def test_unicode(self):
input_words = [u" this is unicode Юникод"]
correct_words = [u"this is unicode юникод"]
users_state = self._post_words(input_words)
self.assertEqual(
''.join(set([
content['status']
for _, content in users_state.items()
])),
'success')
for user in self.users:
self.assertListEqual(
users_state[user.username]['student_words'].keys(),
correct_words)
def test_handle_ajax_incorrect_dispatch(self):
responses = {
user.username: self.clients[user.username].post(
self.get_url('whatever'),
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
for user in self.users
}
status_codes = {response.status_code for response in responses.values()}
self.assertEqual(status_codes.pop(), 200)
for user in self.users:
self.assertDictEqual(
json.loads(responses[user.username].content),
{
'status': 'fail',
'error': 'Unknown Command!'
}
)
def test_word_cloud_constructor(self):
"""Make sure that all parameters extracted correclty from xml"""
fragment = self.runtime.render(self.item_descriptor, STUDENT_VIEW)
expected_context = {
'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url,
'element_class': self.item_descriptor.location.category,
'element_id': self.item_descriptor.location.html_id(),
'num_inputs': 5, # default value
'submitted': False # default value
}
self.assertEqual(fragment.content, self.runtime.render_template('word_cloud.html', expected_context))
| agpl-3.0 |
collinss/Cinnamon | files/usr/share/cinnamon/cinnamon-settings/cinnamon-settings.py | 1 | 27007 | #!/usr/bin/python3
import sys
import os
import glob
import gettext
import time
import traceback
import locale
import urllib.request as urllib
from functools import cmp_to_key
import unicodedata
import config
from setproctitle import setproctitle
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('XApp', '1.0')
from gi.repository import Gio, Gtk, Pango, Gdk, XApp
sys.path.append(config.currentPath + "/modules")
sys.path.append(config.currentPath + "/bin")
import capi
import proxygsettings
import SettingsWidgets
# i18n
gettext.install("cinnamon", "/usr/share/locale", names="ngettext")
# Standard setting pages... this can be expanded to include applet dirs maybe?
mod_files = glob.glob(config.currentPath + "/modules/*.py")
mod_files.sort()
if len(mod_files) is 0:
print("No settings modules found!!")
sys.exit(1)
mod_files = [x.split('/')[-1].split('.')[0] for x in mod_files]
for mod_file in mod_files:
if mod_file[0:3] != "cs_":
raise Exception("Settings modules must have a prefix of 'cs_' !!")
modules = map(__import__, mod_files)
# i18n for menu item
menuName = _("System Settings")
menuComment = _("Control Center")
WIN_WIDTH = 800
WIN_HEIGHT = 600
WIN_H_PADDING = 20
MIN_LABEL_WIDTH = 16
MAX_LABEL_WIDTH = 25
MIN_PIX_WIDTH = 100
MAX_PIX_WIDTH = 160
MOUSE_BACK_BUTTON = 8
CATEGORIES = [
# Display name ID Show it? Always False to start Icon
{"label": _("Appearance"), "id": "appear", "show": False, "icon": "cs-cat-appearance"},
{"label": _("Preferences"), "id": "prefs", "show": False, "icon": "cs-cat-prefs"},
{"label": _("Hardware"), "id": "hardware", "show": False, "icon": "cs-cat-hardware"},
{"label": _("Administration"), "id": "admin", "show": False, "icon": "cs-cat-admin"}
]
CONTROL_CENTER_MODULES = [
# Label Module ID Icon Category Keywords for filter
[_("Network"), "network", "cs-network", "hardware", _("network, wireless, wifi, ethernet, broadband, internet")],
[_("Display"), "display", "cs-display", "hardware", _("display, screen, monitor, layout, resolution, dual, lcd")],
[_("Color"), "color", "cs-color", "hardware", _("color, profile, display, printer, output")],
[_("Graphics Tablet"), "wacom", "cs-tablet", "hardware", _("wacom, digitize, tablet, graphics, calibrate, stylus")]
]
STANDALONE_MODULES = [
# Label Executable Icon Category Keywords for filter
[_("Printers"), "system-config-printer", "cs-printer", "hardware", _("printers, laser, inkjet")],
[_("Firewall"), "gufw", "cs-firewall", "admin", _("firewall, block, filter, programs")],
[_("Firewall"), "firewall-config", "cs-firewall", "admin", _("firewall, block, filter, programs")],
[_("Languages"), "mintlocale", "cs-language", "prefs", _("language, install, foreign")],
[_("Input Method"), "mintlocale-im", "cs-input-method", "prefs", _("language, install, foreign, input, method, chinese, korean, japanese, typing")],
[_("Login Window"), "pkexec lightdm-settings", "cs-login", "admin", _("login, lightdm, mdm, gdm, manager, user, password, startup, switch")],
[_("Login Window"), "lightdm-gtk-greeter-settings-pkexec", "cs-login", "admin", _("login, lightdm, manager, settings, editor")],
[_("Driver Manager"), "pkexec driver-manager", "cs-drivers", "admin", _("video, driver, wifi, card, hardware, proprietary, nvidia, radeon, nouveau, fglrx")],
[_("Nvidia Settings"), "nvidia-settings", "cs-drivers", "admin", _("video, driver, proprietary, nvidia, settings")],
[_("Software Sources"), "pkexec mintsources", "cs-sources", "admin", _("ppa, repository, package, source, download")],
[_("Package Management"), "dnfdragora", "cs-sources", "admin", _("update, install, repository, package, source, download")],
[_("Package Management"), "yumex-dnf", "cs-sources", "admin", _("update, install, repository, package, source, download")],
[_("Users and Groups"), "cinnamon-settings-users", "cs-user-accounts", "admin", _("user, users, account, accounts, group, groups, password")],
[_("Bluetooth"), "blueberry", "cs-bluetooth", "hardware", _("bluetooth, dongle, transfer, mobile")],
[_("Manage Services and Units"), "systemd-manager-pkexec", "cs-sources", "admin", _("systemd, units, services, systemctl, init")]
]
def print_timing(func):
# decorate functions with @print_timing to output how long they take to run.
def wrapper(*arg):
t1 = time.time()
res = func(*arg)
t2 = time.time()
print('%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0))
return res
return wrapper
def touch(fname, times=None):
with file(fname, 'a'):
os.utime(fname, times)
class MainWindow:
# Change pages
def side_view_nav(self, side_view, path, cat):
selected_items = side_view.get_selected_items()
if len(selected_items) > 0:
self.deselect(cat)
filtered_path = side_view.get_model().convert_path_to_child_path(selected_items[0])
if filtered_path is not None:
self.go_to_sidepage(cat, filtered_path, user_action=True)
def _on_sidepage_hide_stack(self):
self.stack_switcher.set_opacity(0)
def _on_sidepage_show_stack(self):
self.stack_switcher.set_opacity(1)
def go_to_sidepage(self, cat, path, user_action=True):
iterator = self.store[cat].get_iter(path)
sidePage = self.store[cat].get_value(iterator,2)
if not sidePage.is_standalone:
if not user_action:
self.window.set_title(sidePage.name)
self.window.set_icon_name(sidePage.icon)
sidePage.build()
if sidePage.stack:
current_page = sidePage.stack.get_visible_child_name()
self.stack_switcher.set_stack(sidePage.stack)
l = sidePage.stack.get_children()
if len(l) > 0:
sidePage.stack.set_visible_child(l[0])
if sidePage.stack.get_visible():
self.stack_switcher.set_opacity(1)
else:
self.stack_switcher.set_opacity(0)
if hasattr(sidePage, "connect_proxy"):
sidePage.connect_proxy("hide_stack", self._on_sidepage_hide_stack)
sidePage.connect_proxy("show_stack", self._on_sidepage_show_stack)
else:
self.stack_switcher.set_opacity(0)
else:
self.stack_switcher.set_opacity(0)
if user_action:
self.main_stack.set_visible_child_name("content_box_page")
self.header_stack.set_visible_child_name("content_box")
else:
self.main_stack.set_visible_child_full("content_box_page", Gtk.StackTransitionType.NONE)
self.header_stack.set_visible_child_full("content_box", Gtk.StackTransitionType.NONE)
self.current_sidepage = sidePage
width = 0
for widget in self.top_bar:
m, n = widget.get_preferred_width()
width += n
self.top_bar.set_size_request(width + 20, -1)
self.maybe_resize(sidePage)
else:
sidePage.build()
def maybe_resize(self, sidePage):
m, n = self.content_box.get_preferred_size()
# Resize vertically depending on the height requested by the module
use_height = WIN_HEIGHT
total_height = n.height + self.bar_heights + WIN_H_PADDING
if not sidePage.size:
# No height requested, resize vertically if the module is taller than the window
if total_height > WIN_HEIGHT:
use_height = total_height
elif sidePage.size > 0:
# Height hardcoded by the module
use_height = sidePage.size + self.bar_heights + WIN_H_PADDING
elif sidePage.size == -1:
# Module requested the window to fit it (i.e. shrink the window if necessary)
use_height = total_height
self.window.resize(WIN_WIDTH, use_height)
def deselect(self, cat):
for key in self.side_view:
if key is not cat:
self.side_view[key].unselect_all()
''' Create the UI '''
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file(config.currentPath + "/cinnamon-settings.ui")
self.window = XApp.GtkWindow(window_position=Gtk.WindowPosition.CENTER,
default_width=800, default_height=600)
main_box = self.builder.get_object("main_box")
self.window.add(main_box)
self.top_bar = self.builder.get_object("top_bar")
self.side_view = {}
self.main_stack = self.builder.get_object("main_stack")
self.main_stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.main_stack.set_transition_duration(150)
self.header_stack = self.builder.get_object("header_stack")
self.header_stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.header_stack.set_transition_duration(150)
self.side_view_container = self.builder.get_object("category_box")
self.side_view_sw = self.builder.get_object("side_view_sw")
context = self.side_view_sw.get_style_context()
context.add_class("cs-category-view")
context.add_class("view")
self.side_view_sw.show_all()
self.content_box = self.builder.get_object("content_box")
self.content_box_sw = self.builder.get_object("content_box_sw")
self.content_box_sw.show_all()
self.button_back = self.builder.get_object("button_back")
self.button_back.set_tooltip_text(_("Back to all settings"))
button_image = self.builder.get_object("image1")
button_image.props.icon_size = Gtk.IconSize.MENU
self.stack_switcher = self.builder.get_object("stack_switcher")
m, n = self.button_back.get_preferred_width()
self.stack_switcher.set_margin_end(n)
self.search_entry = self.builder.get_object("search_box")
self.search_entry.set_placeholder_text(_("Search"))
self.search_entry.connect("changed", self.onSearchTextChanged)
self.search_entry.connect("icon-press", self.onClearSearchBox)
self.window.connect("destroy", self.quit)
self.builder.connect_signals(self)
self.unsortedSidePages = []
self.sidePages = []
self.settings = Gio.Settings.new("org.cinnamon")
self.current_cat_widget = None
self.current_sidepage = None
self.c_manager = capi.CManager()
self.content_box.c_manager = self.c_manager
self.bar_heights = 0
for module in modules:
try:
mod = module.Module(self.content_box)
if self.loadCheck(mod) and self.setParentRefs(mod):
self.unsortedSidePages.append((mod.sidePage, mod.name, mod.category))
except:
print("Failed to load module %s" % module)
traceback.print_exc()
for item in CONTROL_CENTER_MODULES:
ccmodule = SettingsWidgets.CCModule(item[0], item[1], item[2], item[3], item[4], self.content_box)
if ccmodule.process(self.c_manager):
self.unsortedSidePages.append((ccmodule.sidePage, ccmodule.name, ccmodule.category))
for item in STANDALONE_MODULES:
samodule = SettingsWidgets.SAModule(item[0], item[1], item[2], item[3], item[4], self.content_box)
if samodule.process():
self.unsortedSidePages.append((samodule.sidePage, samodule.name, samodule.category))
# sort the modules alphabetically according to the current locale
localeStrKey = cmp_to_key(locale.strcoll)
# Apply locale key to the field name of each side page.
sidePagesKey = lambda m : localeStrKey(m[0].name)
self.sidePages = sorted(self.unsortedSidePages, key=sidePagesKey)
# create the backing stores for the side nav-view.
sidePagesIters = {}
self.store = {}
self.storeFilter = {}
for sidepage in self.sidePages:
sp, sp_id, sp_cat = sidepage
if sp_cat not in self.store: # Label Icon sidePage Category
self.store[sidepage[2]] = Gtk.ListStore(str, str, object, str)
for category in CATEGORIES:
if category["id"] == sp_cat:
category["show"] = True
# Don't allow item names (and their translations) to be more than 30 chars long. It looks ugly and it creates huge gaps in the icon views
name = sp.name
if len(name) > 30:
name = "%s..." % name[:30]
sidePagesIters[sp_id] = (self.store[sp_cat].append([name, sp.icon, sp, sp_cat]), sp_cat)
self.min_label_length = 0
self.min_pix_length = 0
for key in self.store:
char, pix = self.get_label_min_width(self.store[key])
self.min_label_length = max(char, self.min_label_length)
self.min_pix_length = max(pix, self.min_pix_length)
self.storeFilter[key] = self.store[key].filter_new()
self.storeFilter[key].set_visible_func(self.filter_visible_function)
self.min_label_length += 2
self.min_pix_length += 4
self.min_label_length = max(self.min_label_length, MIN_LABEL_WIDTH)
self.min_pix_length = max(self.min_pix_length, MIN_PIX_WIDTH)
self.min_label_length = min(self.min_label_length, MAX_LABEL_WIDTH)
self.min_pix_length = min(self.min_pix_length, MAX_PIX_WIDTH)
self.displayCategories()
# set up larger components.
self.window.set_title(_("System Settings"))
self.button_back.connect('clicked', self.back_to_icon_view)
self.calculate_bar_heights()
# Select the first sidePage
if len(sys.argv) > 1 and sys.argv[1] in sidePagesIters:
# If we're launching a module directly, set the WM class so GWL
# can consider it as a standalone app and give it its own
# group.
wm_class = "cinnamon-settings %s" % sys.argv[1]
self.window.set_wmclass(wm_class, wm_class)
self.button_back.hide()
(iter, cat) = sidePagesIters[sys.argv[1]]
path = self.store[cat].get_path(iter)
if path:
self.go_to_sidepage(cat, path, user_action=False)
else:
self.search_entry.grab_focus()
else:
self.search_entry.grab_focus()
self.window.connect("key-press-event", self.on_keypress)
self.window.connect("button-press-event", self.on_buttonpress)
self.window.show()
def on_keypress(self, widget, event):
grab = False
device = Gtk.get_current_event_device()
if device.get_source() == Gdk.InputSource.KEYBOARD:
grab = Gdk.Display.get_default().device_is_grabbed(device)
if not grab and event.keyval == Gdk.KEY_BackSpace and (type(self.window.get_focus()) not in
(Gtk.TreeView, Gtk.Entry, Gtk.SpinButton, Gtk.TextView)):
self.back_to_icon_view(None)
return True
return False
def on_buttonpress(self, widget, event):
if event.button == MOUSE_BACK_BUTTON:
self.back_to_icon_view(None)
return True
return False
def calculate_bar_heights(self):
h = 0
m, n = self.top_bar.get_preferred_size()
h += n.height
self.bar_heights = h
def onSearchTextChanged(self, widget):
self.displayCategories()
def onClearSearchBox(self, widget, position, event):
if position == Gtk.EntryIconPosition.SECONDARY:
self.search_entry.set_text("")
def strip_accents(self, text):
try:
text = unicode(text, 'utf-8')
except NameError:
# unicode is default in Python 3
pass
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def filter_visible_function(self, model, iter, user_data = None):
sidePage = model.get_value(iter, 2)
text = self.strip_accents(self.search_entry.get_text().lower())
if self.strip_accents(sidePage.name.lower()).find(text) > -1 or \
self.strip_accents(sidePage.keywords.lower()).find(text) > -1:
return True
else:
return False
def displayCategories(self):
widgets = self.side_view_container.get_children()
for widget in widgets:
widget.destroy()
self.first_category_done = False # This is just to prevent an extra separator showing up before the first category
for category in CATEGORIES:
if category["show"] is True:
self.prepCategory(category)
self.side_view_container.show_all()
def get_label_min_width(self, model):
min_width_chars = 0
min_width_pixels = 0
icon_view = Gtk.IconView()
iter = model.get_iter_first()
while iter != None:
string = model.get_value(iter, 0)
split_by_word = string.split(" ")
for word in split_by_word:
layout = icon_view.create_pango_layout(word)
item_width, item_height = layout.get_pixel_size()
if item_width > min_width_pixels:
min_width_pixels = item_width
if len(word) > min_width_chars:
min_width_chars = len(word)
iter = model.iter_next(iter)
return min_width_chars, min_width_pixels
def pixbuf_data_func(self, column, cell, model, iter, data=None):
wrapper = model.get_value(iter, 1)
if wrapper:
cell.set_property('surface', wrapper.surface)
def prepCategory(self, category):
self.storeFilter[category["id"]].refilter()
if not self.anyVisibleInCategory(category):
return
if self.first_category_done:
widget = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL)
self.side_view_container.pack_start(widget, False, False, 10)
box = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 4)
img = Gtk.Image.new_from_icon_name(category["icon"], Gtk.IconSize.BUTTON)
box.pack_start(img, False, False, 4)
widget = Gtk.Label(yalign=0.5)
widget.set_use_markup(True)
widget.set_markup('<span size="12000">%s</span>' % category["label"])
box.pack_start(widget, False, False, 1)
self.side_view_container.pack_start(box, False, False, 0)
widget = Gtk.IconView.new_with_model(self.storeFilter[category["id"]])
area = widget.get_area()
widget.set_item_width(self.min_pix_length)
widget.set_item_padding(0)
widget.set_column_spacing(18)
widget.set_row_spacing(18)
widget.set_margin(20)
pixbuf_renderer = Gtk.CellRendererPixbuf()
text_renderer = Gtk.CellRendererText(ellipsize=Pango.EllipsizeMode.NONE, wrap_mode=Pango.WrapMode.WORD_CHAR, wrap_width=0, width_chars=self.min_label_length, alignment=Pango.Alignment.CENTER, xalign=0.5)
area.pack_start(pixbuf_renderer, True, True, False)
area.pack_start(text_renderer, True, True, False)
area.add_attribute(pixbuf_renderer, "icon-name", 1)
pixbuf_renderer.set_property("stock-size", Gtk.IconSize.DIALOG)
pixbuf_renderer.set_property("follow-state", True)
area.add_attribute(text_renderer, "text", 0)
self.side_view[category["id"]] = widget
self.side_view_container.pack_start(self.side_view[category["id"]], False, False, 0)
self.first_category_done = True
self.side_view[category["id"]].connect("item-activated", self.side_view_nav, category["id"])
self.side_view[category["id"]].connect("button-release-event", self.button_press, category["id"])
self.side_view[category["id"]].connect("keynav-failed", self.on_keynav_failed, category["id"])
self.side_view[category["id"]].connect("selection-changed", self.on_selection_changed, category["id"])
def bring_selection_into_view(self, iconview):
sel = iconview.get_selected_items()
if sel:
path = sel[0]
found, rect = iconview.get_cell_rect(path, None)
cw = self.side_view_container.get_window()
cw_x, cw_y = cw.get_position()
ivw = iconview.get_window()
iv_x, iv_y = ivw.get_position()
final_y = rect.y + (rect.height / 2) + cw_y + iv_y
adj = self.side_view_sw.get_vadjustment()
page = adj.get_page_size()
current_pos = adj.get_value()
if final_y > current_pos + page:
adj.set_value(iv_y + rect.y)
elif final_y < current_pos:
adj.set_value(iv_y + rect.y)
def on_selection_changed(self, widget, category):
sel = widget.get_selected_items()
if len(sel) > 0:
self.current_cat_widget = widget
self.bring_selection_into_view(widget)
for iv in self.side_view:
if self.side_view[iv] == self.current_cat_widget:
continue
self.side_view[iv].unselect_all()
def get_cur_cat_index(self, category):
i = 0
for cat in CATEGORIES:
if category == cat["id"]:
return i
i += 1
def get_cur_column(self, iconview):
s, path, cell = iconview.get_cursor()
if path:
col = iconview.get_item_column(path)
return col
def reposition_new_cat(self, sel, iconview):
iconview.set_cursor(sel, None, False)
iconview.select_path(sel)
iconview.grab_focus()
def on_keynav_failed(self, widget, direction, category):
num_cats = len(CATEGORIES)
current_idx = self.get_cur_cat_index(category)
new_cat = CATEGORIES[current_idx]
ret = False
dist = 1000
sel = None
if direction == Gtk.DirectionType.DOWN and current_idx < num_cats - 1:
new_cat = CATEGORIES[current_idx + 1]
col = self.get_cur_column(widget)
new_cat_view = self.side_view[new_cat["id"]]
model = new_cat_view.get_model()
iter = model.get_iter_first()
while iter is not None:
path = model.get_path(iter)
c = new_cat_view.get_item_column(path)
d = abs(c - col)
if d < dist:
sel = path
dist = d
iter = model.iter_next(iter)
self.reposition_new_cat(sel, new_cat_view)
ret = True
elif direction == Gtk.DirectionType.UP and current_idx > 0:
new_cat = CATEGORIES[current_idx - 1]
col = self.get_cur_column(widget)
new_cat_view = self.side_view[new_cat["id"]]
model = new_cat_view.get_model()
iter = model.get_iter_first()
while iter is not None:
path = model.get_path(iter)
c = new_cat_view.get_item_column(path)
d = abs(c - col)
if d <= dist:
sel = path
dist = d
iter = model.iter_next(iter)
self.reposition_new_cat(sel, new_cat_view)
ret = True
return ret
def button_press(self, widget, event, category):
if event.button == 1:
self.side_view_nav(widget, None, category)
def anyVisibleInCategory(self, category):
id = category["id"]
iter = self.storeFilter[id].get_iter_first()
visible = False
while iter is not None:
cat = self.storeFilter[id].get_value(iter, 3)
visible = cat == category["id"]
iter = self.storeFilter[id].iter_next(iter)
return visible
def setParentRefs (self, mod):
try:
mod._setParentRef(self.window)
except AttributeError:
pass
return True
def loadCheck (self, mod):
try:
return mod._loadCheck()
except:
return True
def back_to_icon_view(self, widget):
self.window.set_title(_("System Settings"))
self.window.set_icon_name("preferences-system")
self.window.resize(WIN_WIDTH, WIN_HEIGHT)
children = self.content_box.get_children()
for child in children:
child.hide()
if child.get_name() == "c_box":
c_widgets = child.get_children()
for c_widget in c_widgets:
c_widget.hide()
self.main_stack.set_visible_child_name("side_view_page")
self.header_stack.set_visible_child_name("side_view")
self.search_entry.grab_focus()
self.current_sidepage = None
def quit(self, *args):
self.window.destroy()
Gtk.main_quit()
if __name__ == "__main__":
setproctitle("cinnamon-settings")
import signal
ps = proxygsettings.get_proxy_settings()
if ps:
proxy = urllib.ProxyHandler(ps)
else:
proxy = urllib.ProxyHandler()
urllib.install_opener(urllib.build_opener(proxy))
window = MainWindow()
signal.signal(signal.SIGINT, window.quit)
Gtk.main()
| gpl-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
heeraj123/oh-mainline | vendor/packages/django-assets/django_assets/jinja2/extension.py | 16 | 2628 | from jinja2.ext import Extension
from jinja2 import nodes
from django_assets.conf import settings
from django_assets.merge import process
from django_assets.bundle import Bundle
from django_assets import registry
__all__ = ('assets',)
class AssetsExtension(Extension):
"""
As opposed to the Django tag, this tag is slightly more capable due
to the expressive powers inherited from Jinja. For example:
{% assets "src1.js", "src2.js", get_src3(),
filter=("jsmin", "gzip"), output=get_output() %}
{% endassets %}
"""
tags = set(['assets'])
def parse(self, parser):
lineno = parser.stream.next().lineno
files = []
output = nodes.Const(None)
filter = nodes.Const(None)
# parse the arguments
first = True
while parser.stream.current.type is not 'block_end':
if not first:
parser.stream.expect('comma')
first = False
# lookahead to see if this is an assignment (an option)
if parser.stream.current.test('name') and parser.stream.look().test('assign'):
name = parser.stream.next().value
parser.stream.skip()
value = parser.parse_expression()
if name == 'filter':
filter = value
elif name == 'output':
output = value
else:
parser.fail('Invalid keyword argument: %s' % name)
# otherwise assume a source file is given, which may
# be any expression, except note that strings are handled
# separately above
else:
files.append(parser.parse_expression())
# parse the contents of this tag, and return a block
body = parser.parse_statements(['name:endassets'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_render_assets',
args=[filter, output, nodes.List(files)]),
[nodes.Name('ASSET_URL', 'store')], [], body).\
set_lineno(lineno)
def _render_assets(self, filter, output, files, caller=None):
# resolve bundle names
registry.autoload()
files = [registry.get(f) or f for f in files]
result = u""
urls = process(Bundle(*files, **{'output': output, 'filters': filter}))
for f in urls:
result += caller(f)
return result
assets = AssetsExtension # nicer import name | agpl-3.0 |
meteorcloudy/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py | 33 | 16857 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.test_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.test_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.test_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.test_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.test_session():
cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
with self.assertRaisesOpError("Condition x > 0 did not hold"):
cauchy.mode().eval()
def testCauchyShape(self):
with self.test_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.test_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
| apache-2.0 |
kailIII/emaresa | aeroo/report_aeroo/barcode/EANBarCode.py | 13 | 5228 | # Copyright (c) 2009-2011 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <[email protected]>
from tools import config, ustr
fontsize = 15
"""
This class generate EAN bar code, it required PIL (python imaging library)
installed.
If the code has not checksum (12 digits), it added automatically.
Create bar code sample :
from EANBarCode import EanBarCode
bar = EanBarCode()
bar.getImage("9782212110708",50,"gif")
"""
class EanBarCode:
""" Compute the EAN bar code """
def __init__(self):
A = {0 : "0001101", 1 : "0011001", 2 : "0010011", 3 : "0111101", 4 : "0100011",
5 : "0110001", 6 : "0101111", 7 : "0111011", 8 : "0110111", 9 : "0001011"}
B = {0 : "0100111", 1 : "0110011", 2 : "0011011", 3 : "0100001", 4 : "0011101",
5 : "0111001", 6 : "0000101", 7 : "0010001", 8 : "0001001", 9 : "0010111"}
C = {0 : "1110010", 1 : "1100110", 2 : "1101100", 3 : "1000010", 4 : "1011100",
5 : "1001110", 6 : "1010000", 7 : "1000100", 8 : "1001000", 9 : "1110100"}
self.groupC = C
self.family = {0 : (A,A,A,A,A,A), 1 : (A,A,B,A,B,B), 2 : (A,A,B,B,A,B), 3 : (A,A,B,B,B,A), 4 : (A,B,A,A,B,B),
5 : (A,B,B,A,A,B), 6 : (A,B,B,B,A,A), 7 : (A,B,A,B,A,B), 8 : (A,B,A,B,B,A), 9 : (A,B,B,A,B,A)}
def makeCode(self, code):
""" Create the binary code
return a string which contains "0" for white bar, "1" for black bar, "L" for long bar """
# Convert code string in integer list
self.EAN13 = []
for digit in code:
self.EAN13.append(int(digit))
# If the code has already a checksum
if len(self.EAN13) == 13:
# Verify checksum
self.verifyChecksum(self.EAN13)
# If the code has not yet checksum
elif len(self.EAN13) == 12:
# Add checksum value
self.EAN13.append(self.computeChecksum(self.EAN13))
# Get the left codage class
left = self.family[self.EAN13[0]]
# Add start separator
strCode = 'L0L'
# Compute the left part of bar code
for i in range(0,6):
strCode += left[i][self.EAN13[i+1]]
# Add middle separator
strCode += '0L0L0'
# Compute the right codage class
for i in range (7,13):
strCode += self.groupC[self.EAN13[i]]
# Add stop separator
strCode += 'L0L'
return strCode
def computeChecksum(self, arg):
""" Compute the checksum of bar code """
# UPCA/EAN13
weight=[1,3]*6
magic=10
sum = 0
for i in range(12): # checksum based on first 12 digits.
sum = sum + int(arg[i]) * weight[i]
z = ( magic - (sum % magic) ) % magic
if z < 0 or z >= magic:
return None
return z
def verifyChecksum(self, bits):
""" Verify the checksum """
computedChecksum = self.computeChecksum(bits[:12])
codeBarChecksum = bits[12]
if codeBarChecksum != computedChecksum:
raise Exception ("Bad checksum is %s and should be %s"%(codeBarChecksum, computedChecksum))
def getImage(self, value, height = 50, xw=1, rotate=None, extension = "PNG"):
""" Get an image with PIL library
value code barre value
height height in pixel of the bar code
extension image file extension"""
from PIL import Image, ImageFont, ImageDraw
import os
from string import lower, upper
# Get the bar code list
bits = self.makeCode(value)
# Get thee bar code with the checksum added
code = ""
for digit in self.EAN13:
code += "%d"%digit
# Create a new image
position = 8
im = Image.new("1",(len(bits)+position,height))
# Load font
ad = os.path.abspath(os.path.join(ustr(config['root_path']), u'addons'))
mod_path_list = map(lambda m: os.path.abspath(ustr(m.strip())), config['addons_path'].split(','))
mod_path_list.append(ad)
for mod_path in mod_path_list:
font_file = mod_path+os.path.sep+ \
"report_aeroo"+os.path.sep+"barcode"+os.path.sep+"FreeMonoBold.ttf"
if os.path.lexists(font_file):
font = ImageFont.truetype(font_file, fontsize)
# Create drawer
draw = ImageDraw.Draw(im)
# Erase image
draw.rectangle(((0,0),(im.size[0],im.size[1])),fill=256)
# Draw first part of number
draw.text((0, height-9), code[0], font=font, fill=0)
# Draw first part of number
draw.text((position+7, height-9), code[1:7], font=font, fill=0)
# Draw second part of number
draw.text((len(bits)/2+6+position, height-9), code[7:], font=font, fill=0)
# Draw the bar codes
for bit in range(len(bits)):
# Draw normal bar
if bits[bit] == '1':
draw.rectangle(((bit+position,0),(bit+position,height-10)),fill=0)
# Draw long bar
elif bits[bit] == 'L':
draw.rectangle(((bit+position,0),(bit+position,height-3)),fill=0)
# Save the result image
return im
| agpl-3.0 |
jmighion/ansible | lib/ansible/utils/listify.py | 118 | 1462 | # (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import Iterable
from ansible.module_utils.six import string_types
from ansible.template.safe_eval import safe_eval
__all__ = ['listify_lookup_plugin_terms']
def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=True, convert_bare=False):
if isinstance(terms, string_types):
terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
if isinstance(terms, string_types) or not isinstance(terms, Iterable):
terms = [terms]
return terms
| gpl-3.0 |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/lib2to3/fixes/fix_types.py | 304 | 1806 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_status_code_count_py3.py | 5 | 1325 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineStatusCodeCount(Model):
"""The status code and count of the virtual machine scale set instance view
status summary.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: The instance view status code.
:vartype code: str
:ivar count: The number of instances having a particular status code.
:vartype count: int
"""
_validation = {
'code': {'readonly': True},
'count': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'count': {'key': 'count', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(VirtualMachineStatusCodeCount, self).__init__(**kwargs)
self.code = None
self.count = None
| mit |
xXminiWHOOPERxX/xXminiWHOOPERxX-Kernel-ZaraCL- | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
adamhajari/spyre | tests/test_app.py | 1 | 5046 | # from spyre import server
from spyre import server
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy import pi
class TestApp(server.App):
colors = [
{"label": "Green", "value": 'g'},
{"label": "Red", "value": 'r'},
{"label": "Blue", "value": 'b'},
{"label": "Yellow", "value": 'y'},
]
on_demand_streaming_services = [
{"label": "Spotify", "value": 's'},
{"label": "Apple Music", "value": 'a'},
]
title = "Simple Sine Wave"
inputs = [
{
"input_type": 'text',
"label": 'Title',
"value": 'Simple Sine Wave',
"variable_name": 'title',
"action_id": "plot",
}, {
"input_type": 'radiobuttons',
"label": 'Function',
"options": [
{"label": "Sine", "value": "sin", "checked": True},
{"label": "Cosine", "value": "cos"}
],
"variable_name": 'func_type',
"action_id": "plot",
}, {
"input_type": 'checkboxgroup',
"label": 'Axis Labels',
"options": [
{"label": "x-axis", "value": 1, "checked": True},
{"label": "y-axis", "value": 2}
],
"variable_name": 'axis_label',
"action_id": "plot",
}, {
"input_type": 'dropdown',
"label": 'Line Color',
"options": colors,
"variable_name": 'color',
"value": "b",
"action_id": "plot",
}, {
"input_type": 'dropdown',
"label": 'On-Demand Streaming Service',
"options": on_demand_streaming_services,
"variable_name": 'on_demand_streaming_service',
"action_id": "plot",
}, {
"input_type": 'slider',
"label": 'frequency',
"variable_name": 'freq',
"value": 2,
"min": 1,
"max": 30,
"action_id": "plot",
}
]
controls = [
{
"control_type": "button",
"control_id": "button1",
"label": "plot",
}, {
"control_type": "button",
"control_id": "button2",
"label": "download",
}
]
outputs = [
{
"output_type": "html",
"output_id": "html1",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "plot",
"output_id": "plot",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "plot",
"output_id": "plot2",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "table",
"output_id": "table_id",
"control_id": "button1",
"sortable": True,
"on_page_load": True,
}, {
"output_type": "download",
"output_id": "download_id",
"control_id": "button2",
}
]
def plot1(self, params):
fig = plt.figure() # make figure object
splt = fig.add_subplot(1, 1, 1)
f = float(params['freq'])
title = params['title']
axis_label = map(int, params['axis_label'])
color = params['color']
func_type = params['func_type']
x = np.arange(0, 6 * pi, pi / 50)
splt.set_title(title)
for axis in axis_label:
if axis == 1:
splt.set_xlabel('x axis')
if axis == 2:
splt.set_ylabel('y axis')
if func_type == 'cos':
y = np.cos(f * x)
else:
y = np.sin(f * x)
splt.plot(x, y, color=color) # sine wave
return fig
def plot2(self, params):
data = self.getData(params)
fig = plt.figure() # make figure object
splt = fig.add_subplot(1, 1, 1)
ind = np.arange(len(data['name']))
width = 0.85
splt.bar(ind, data['count'], width)
splt.set_xticks(ind + width / 2)
splt.set_xticklabels(["A", "B", "C"])
return fig
def html1(self, params):
return "hello world"
def html2(self, params):
func_type = params['func_type']
axis_label = params['axis_label']
color = params['color']
freq = params['freq']
html = (
"function type: {} <br>axis label: {}<br>color: {}<br>frequency: {}"
.format(func_type, axis_label, color, freq)
)
return html
def getJsonData(self, params):
count = [1, 4, 3]
name = ['<a href="http://adamhajari.com">A</a>', 'B', 'C']
return {'name': name, 'count': count}
def getData(self, params):
data = self.getJsonData(params)
df = pd.DataFrame(data)
return df
def noOutput(self, input_params):
return 0
# app = TestApp()
# app.launch()
| mit |
stargaser/astropy | astropy/table/info.py | 3 | 7420 | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
from contextlib import contextmanager
from inspect import isclass
import numpy as np
from astropy.utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo', 'serialize_method_as']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1.0 2.0
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append('length={}'.format(len(tbl)))
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = tbl.columns.values()
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = set(type(col) for col in cols)
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isclass(key) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
if serialize_method:
# Original serialize_method dict, keyed by column name. This only
# gets set if there is an override.
original_sms = {}
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, 'serialize_method'):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {fmt: override_sm
for fmt in col.info.serialize_method}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
| bsd-3-clause |
vanloswang/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
brion/cerbero | cerbero/build/recipe.py | 1 | 15439 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import logging
import shutil
import tempfile
import time
from cerbero.build import build, source
from cerbero.build.filesprovider import FilesProvider
from cerbero.config import Platform
from cerbero.errors import FatalError
from cerbero.ide.vs.genlib import GenLib
from cerbero.tools.osxuniversalgenerator import OSXUniversalGenerator
from cerbero.utils import N_, _
from cerbero.utils import shell
from cerbero.utils import messages as m
class MetaRecipe(type):
''' This metaclass modifies the base classes of a Receipt, adding 2 new
base classes based on the class attributes 'stype' and 'btype'.
class NewReceipt(Receipt):
btype = Class1 ------> class NewReceipt(Receipt, Class1, Class2)
stype = Class2
'''
def __new__(cls, name, bases, dct):
clsname = '%s.%s' % (dct['__module__'], name)
recipeclsname = '%s.%s' % (cls.__module__, 'Recipe')
# only modify it for Receipt's subclasses
if clsname != recipeclsname and name == 'Recipe':
# get the default build and source classes from Receipt
# Receipt(DefaultSourceType, DefaultBaseType)
basedict = {'btype': bases[0].btype, 'stype': bases[0].stype}
# if this class define stype or btype, override the default one
# Receipt(OverridenSourceType, OverridenBaseType)
for base in ['stype', 'btype']:
if base in dct:
basedict[base] = dct[base]
# finally add this classes the Receipt bases
# Receipt(BaseClass, OverridenSourceType, OverridenBaseType)
bases = bases + tuple(basedict.values())
return type.__new__(cls, name, bases, dct)
class BuildSteps(object):
'''
Enumeration factory for build steps
'''
FETCH = (N_('Fetch'), 'fetch')
EXTRACT = (N_('Extract'), 'extract')
CONFIGURE = (N_('Configure'), 'configure')
COMPILE = (N_('Compile'), 'compile')
INSTALL = (N_('Install'), 'install')
POST_INSTALL = (N_('Post Install'), 'post_install')
# Not added by default
CHECK = (N_('Check'), 'check')
GEN_LIBFILES = (N_('Gen Library File'), 'gen_library_file')
MERGE = (N_('Merge universal binaries'), 'merge')
def __new__(klass):
return [BuildSteps.FETCH, BuildSteps.EXTRACT,
BuildSteps.CONFIGURE, BuildSteps.COMPILE, BuildSteps.INSTALL,
BuildSteps.POST_INSTALL]
class Recipe(FilesProvider):
'''
Base class for recipes.
A Recipe describes a module and the way it's built.
@cvar name: name of the module
@type name: str
@cvar licenses: recipe licenses
@type licenses: Licenses
@cvar version: version of the module
@type version: str
@cvar sources: url of the sources
@type sources: str
@cvar stype: type of sources
@type stype: L{cerbero.source.SourceType}
@cvar btype: build type
@type btype: L{cerbero.build.BuildType}
@cvar deps: module dependencies
@type deps: list
@cvar platform_deps: platform conditional depencies
@type platform_deps: dict
@cvar runtime_dep: runtime dep common to all recipes
@type runtime_dep: bool
'''
__metaclass__ = MetaRecipe
name = None
licenses = []
version = None
package_name = None
sources = None
stype = source.SourceType.GIT_TARBALL
btype = build.BuildType.AUTOTOOLS
deps = list()
platform_deps = {}
force = False
runtime_dep = False
_default_steps = BuildSteps()
def __init__(self, config):
self.config = config
if self.package_name is None:
self.package_name = "%s-%s" % (self.name, self.version)
if not hasattr(self, 'repo_dir'):
self.repo_dir = os.path.join(self.config.local_sources,
self.package_name)
self.repo_dir = os.path.abspath(self.repo_dir)
self.build_dir = os.path.join(self.config.sources, self.package_name)
self.build_dir = os.path.abspath(self.build_dir)
self.deps = self.deps or []
self.platform_deps = self.platform_deps or []
self._steps = self._default_steps[:]
if self.config.target_platform == Platform.WINDOWS:
self._steps.append(BuildSteps.GEN_LIBFILES)
FilesProvider.__init__(self, config)
try:
self.stype.__init__(self)
self.btype.__init__(self)
except TypeError:
# should only work with subclasses that really have Build and
# Source in bases
pass
def __str__(self):
return self.name
def prepare(self):
'''
Can be overriden by subclasess to modify the recipe in function of
the configuration, like modifying steps for a given platform
'''
pass
def post_install(self):
'''
Runs a post installation steps
'''
pass
def built_version(self):
'''
Gets the current built version of the recipe.
Sources can override it to provide extended info in the version
such as the commit hash for recipes using git and building against
master: eg (1.2.0~git+2345435)
'''
if hasattr(self.stype, 'built_version'):
return self.stype.built_version(self)
return self.version
def list_deps(self):
'''
List all dependencies including conditional dependencies
'''
deps = []
deps.extend(self.deps)
if self.config.target_platform in self.platform_deps:
deps.extend(self.platform_deps[self.config.target_platform])
if self.config.variants.gi and self.use_gobject_introspection():
if self.name != 'gobject-introspection':
deps.append('gobject-introspection')
return deps
def list_licenses_by_categories(self, categories):
licenses = {}
for c in categories:
if c in licenses:
raise Exception('multiple licenses for the same category %s '
'defined' % c)
if not c:
licenses[None] = self.licenses
continue
attr = 'licenses_' + c
platform_attr = 'platform_licenses_' + c
if hasattr(self, attr):
licenses[c] = getattr(self, attr)
elif hasattr(self, platform_attr):
l = getattr(self, platform_attr)
licenses[c] = l.get(self.platform, [])
else:
licenses[c] = self.licenses
return licenses
def gen_library_file(self, output_dir=None):
'''
Generates library files (.lib) for the dll's provided by this recipe
'''
genlib = GenLib()
for dllpath in self.libraries():
try:
implib = genlib.create(
os.path.join(self.config.prefix, dllpath),
self.config.target_arch,
os.path.join(self.config.prefix, 'lib'))
logging.debug('Created %s' % implib)
except:
m.warning("Could not create .lib, gendef might be missing")
def recipe_dir(self):
'''
Gets the directory path where this recipe is stored
@return: directory path
@rtype: str
'''
return os.path.dirname(self.__file__)
def relative_path(self, path):
'''
Gets a path relative to the recipe's directory
@return: absolute path relative to the pacakge's directory
@rtype: str
'''
return os.path.abspath(os.path.join(self.recipe_dir(), path))
@property
def steps(self):
return self._steps
def _remove_steps(self, steps):
self._steps = [x for x in self._steps if x not in steps]
class MetaUniversalRecipe(type):
'''
Wraps all the build steps for the universal recipe to be called for each
one of the child recipes.
'''
def __init__(cls, name, bases, ns):
step_func = ns.get('_do_step')
for _, step in BuildSteps():
setattr(cls, step, lambda self, name=step: step_func(self, name))
class UniversalRecipe(object):
'''
Stores similar recipe objects that are going to be built together
Useful for the universal architecture, where the same recipe needs
to be built for different architectures before being merged. For the
other targets, it will likely be a unitary group
'''
__metaclass__ = MetaUniversalRecipe
def __init__(self, config):
self._config = config
self._recipes = {}
self._proxy_recipe = None
def __str__(self):
if self._recipes.values():
return str(self._recipes.values()[0])
return super(UniversalRecipe, self).__str__()
def add_recipe(self, recipe):
'''
Adds a new recipe to the group
'''
if self._proxy_recipe is None:
self._proxy_recipe = recipe
else:
if recipe.name != self._proxy_recipe.name:
raise FatalError(_("Recipes must have the same name"))
self._recipes[recipe.config.target_arch] = recipe
def is_empty(self):
return len(self._recipes) == 0
@property
def steps(self):
if self.is_empty():
return []
return self._proxy_recipe.steps[:]
def __getattr__(self, name):
if not self._proxy_recipe:
raise AttributeError(_("Attribute %s was not found in the "
"Universal recipe, which is empty. You might need to add a "
"recipe first."))
return getattr(self._proxy_recipe, name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name not in ['_config', '_recipes', '_proxy_recipe']:
for o in self._recipes.values():
setattr(o, name, value)
def _do_step(self, step):
if step in BuildSteps.FETCH:
# No, really, let's not download a million times...
stepfunc = getattr(self._recipes.values()[0], step)
stepfunc()
return
for arch, recipe in self._recipes.iteritems():
config = self._config.arch_config[arch]
config.do_setup_env()
stepfunc = getattr(recipe, step)
# Call the step function
stepfunc()
class UniversalFlatRecipe(UniversalRecipe):
'''
Unversal recipe for iOS and OS X creating flat libraries
in the target prefix instead of subdirs for each architecture
'''
def __init__(self, config):
UniversalRecipe.__init__(self, config)
@property
def steps(self):
if self.is_empty():
return []
return self._proxy_recipe.steps[:] + [BuildSteps.MERGE]
def merge(self):
arch_inputs = {}
for arch, recipe in self._recipes.iteritems():
# change the prefix temporarly to the arch prefix where files are
# actually installed
recipe.config.prefix = os.path.join(self.config.prefix, arch)
arch_inputs[arch] = set(recipe.files_list())
recipe.config.prefix = self._config.prefix
# merge the common files
inputs = reduce(lambda x, y: x & y, arch_inputs.values())
output = self._config.prefix
generator = OSXUniversalGenerator(output)
generator.merge_files(list(inputs),
[os.path.join(self._config.prefix, arch) for arch in
self._recipes.keys()])
# merge the architecture specific files
for arch in self._recipes.keys():
ainputs = list(inputs ^ arch_inputs[arch])
output = self._config.prefix
generator = OSXUniversalGenerator(output)
generator.merge_files(ainputs,
[os.path.join(self._config.prefix, arch)])
def _do_step(self, step):
if step in BuildSteps.FETCH:
# No, really, let's not download a million times...
stepfunc = getattr(self._recipes.values()[0], step)
stepfunc()
return
# For the universal build we need to configure both architectures with
# with the same final prefix, but we want to install each architecture
# on a different path (eg: /path/to/prefix/x86).
archs_prefix = self._recipes.keys()
for arch, recipe in self._recipes.iteritems():
config = self._config.arch_config[arch]
config.do_setup_env()
stepfunc = getattr(recipe, step)
# Create a stamp file to list installed files based on the
# modification time of this file
if step in [BuildSteps.INSTALL[1], BuildSteps.POST_INSTALL[1]]:
time.sleep(2) #wait 2 seconds to make sure new files get the
#proper time difference, this fixes an issue of
#the next recipe to be built listing the previous
#recipe files as their own
tmp = tempfile.NamedTemporaryFile()
# the modification time resolution depends on the filesystem,
# where FAT32 has a resolution of 2 seconds and ext4 1 second
t = time.time() - 2
os.utime(tmp.name, (t, t))
# Call the step function
stepfunc()
# Move installed files to the architecture prefix
if step in [BuildSteps.INSTALL[1], BuildSteps.POST_INSTALL[1]]:
installed_files = shell.find_newer_files(self._config.prefix,
tmp.name, True)
tmp.close()
for f in installed_files:
def not_in_prefix(src):
for p in archs_prefix + ['Libraries']:
if src.startswith(p):
return True
return False
# skip files that are installed in the arch prefix
if not_in_prefix(f):
continue
src = os.path.join(self._config.prefix, f)
dest = os.path.join(self._config.prefix,
recipe.config.target_arch, f)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
shutil.move(src, dest)
| lgpl-2.1 |
kornicameister/ansible-modules-extras | monitoring/pagerduty_alert.py | 121 | 7587 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: pagerduty_alert
short_description: Trigger, acknowledge or resolve PagerDuty incidents
description:
- This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
version_added: "1.9"
author:
- "Amanpreet Singh (@aps-sids)"
requirements:
- PagerDuty API access
options:
name:
description:
- PagerDuty unique subdomain.
required: true
service_key:
description:
- The GUID of one of your "Generic API" services.
- This is the "service key" listed on a Generic API's service detail page.
required: true
state:
description:
- Type of event to be sent.
required: true
choices:
- 'triggered'
- 'acknowledged'
- 'resolved'
api_key:
description:
- The pagerduty API key (readonly access), generated on the pagerduty site.
required: true
desc:
description:
- For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters.
- For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
required: false
default: Created via Ansible
incident_key:
description:
- Identifies the incident to which this I(state) should be applied.
- For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports.
- For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
required: false
client:
description:
- The name of the monitoring client that is triggering this event.
required: false
client_url:
description:
- The URL of the monitoring client that is triggering this event.
required: false
'''
EXAMPLES = '''
# Trigger an incident with just the basic options
- pagerduty_alert:
name: companyabc
service_key=xxx
api_key:yourapikey
state=triggered
desc="problem that led to this trigger"
# Trigger an incident with more options
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=triggered
desc="problem that led to this trigger"
incident_key=somekey
client="Sample Monitoring Service"
client_url=http://service.example.com
# Acknowledge an incident based on incident_key
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=acknowledged
incident_key=somekey
desc="some text for incident's log"
# Resolve an incident based on incident_key
- pagerduty_alert:
service_key=xxx
api_key=yourapikey
state=resolved
incident_key=somekey
desc="some text for incident's log"
'''
def check(module, name, state, service_key, api_key, incident_key=None):
url = "https://%s.pagerduty.com/api/v1/incidents" % name
headers = {
"Content-type": "application/json",
"Authorization": "Token token=%s" % api_key
}
data = {
"service_key": service_key,
"incident_key": incident_key,
"sort_by": "incident_number:desc"
}
response, info = fetch_url(module, url, method='get',
headers=headers, data=json.dumps(data))
if info['status'] != 200:
module.fail_json(msg="failed to check current incident status."
"Reason: %s" % info['msg'])
json_out = json.loads(response.read())["incidents"][0]
if state != json_out["status"]:
return json_out, True
return json_out, False
def send_event(module, service_key, event_type, desc,
incident_key=None, client=None, client_url=None):
url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
headers = {
"Content-type": "application/json"
}
data = {
"service_key": service_key,
"event_type": event_type,
"incident_key": incident_key,
"description": desc,
"client": client,
"client_url": client_url
}
response, info = fetch_url(module, url, method='post',
headers=headers, data=json.dumps(data))
if info['status'] != 200:
module.fail_json(msg="failed to %s. Reason: %s" %
(event_type, info['msg']))
json_out = json.loads(response.read())
return json_out
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
service_key=dict(required=True),
api_key=dict(required=True),
state=dict(required=True,
choices=['triggered', 'acknowledged', 'resolved']),
client=dict(required=False, default=None),
client_url=dict(required=False, default=None),
desc=dict(required=False, default='Created via Ansible'),
incident_key=dict(required=False, default=None)
),
supports_check_mode=True
)
name = module.params['name']
service_key = module.params['service_key']
api_key = module.params['api_key']
state = module.params['state']
client = module.params['client']
client_url = module.params['client_url']
desc = module.params['desc']
incident_key = module.params['incident_key']
state_event_dict = {
'triggered': 'trigger',
'acknowledged': 'acknowledge',
'resolved': 'resolve'
}
event_type = state_event_dict[state]
if event_type != 'trigger' and incident_key is None:
module.fail_json(msg="incident_key is required for "
"acknowledge or resolve events")
out, changed = check(module, name, state,
service_key, api_key, incident_key)
if not module.check_mode and changed is True:
out = send_event(module, service_key, event_type, desc,
incident_key, client, client_url)
module.exit_json(result=out, changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
8u1a/plaso | plaso/cli/helpers/interface.py | 3 | 1180 | # -*- coding: utf-8 -*-
"""The arguments helper interface."""
class ArgumentsHelper(object):
"""The CLI arguments helper class."""
NAME = u'baseline'
# Category further divides the registered helpers down after function,
# this can be something like: analysis, output, storage, etc.
CATEGORY = u''
DESCRIPTION = u''
@classmethod
def AddArguments(cls, argument_group):
"""Add command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group: the argparse group (instance of argparse._ArgumentGroup or
or argparse.ArgumentParser).
"""
@classmethod
def ParseOptions(cls, options, config_object):
"""Parses and validates options.
Args:
options: the parser option object (instance of argparse.Namespace).
config_object: an object that is configured by this helper.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
| apache-2.0 |
HyperBaton/ansible | lib/ansible/modules/network/fortios/fortios_switch_controller_sflow.py | 7 | 8728 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_switch_controller_sflow
short_description: Configure FortiSwitch sFlow in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify switch_controller feature and sflow category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
switch_controller_sflow:
description:
- Configure FortiSwitch sFlow.
default: null
type: dict
suboptions:
collector_ip:
description:
- Collector IP.
type: str
collector_port:
description:
- SFlow collector port (0 - 65535).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure FortiSwitch sFlow.
fortios_switch_controller_sflow:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
switch_controller_sflow:
collector_ip: "<your_own_value>"
collector_port: "4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_switch_controller_sflow_data(json):
option_list = ['collector_ip', 'collector_port']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def switch_controller_sflow(data, fos):
vdom = data['vdom']
switch_controller_sflow_data = data['switch_controller_sflow']
filtered_data = underscore_to_hyphen(filter_switch_controller_sflow_data(switch_controller_sflow_data))
return fos.set('switch-controller',
'sflow',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_switch_controller(data, fos):
if data['switch_controller_sflow']:
resp = switch_controller_sflow(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"switch_controller_sflow": {
"required": False, "type": "dict", "default": None,
"options": {
"collector_ip": {"required": False, "type": "str"},
"collector_port": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_switch_controller(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
cmccoy/startreerenaissance | deploy/tag-instances.py | 1 | 1583 | #!/usr/bin/env python
import argparse
import logging
import sys
import boto
def main():
p = argparse.ArgumentParser()
p.add_argument('cluster_name')
p.add_argument('--dry-run', action='store_true')
a = p.parse_args()
conn = boto.connect_ec2()
active = [instance for res in conn.get_all_instances()
for instance in res.instances
if instance.state in set(['pending', 'running', 'stopping', 'stopped'])]
logging.info('%d active instances', len(active))
master_nodes = []
slave_nodes = []
for instance in active:
group_names = [g.name for g in instance.groups]
if group_names == [a.cluster_name + '-master']:
master_nodes.append(instance)
elif group_names == [a.cluster_name + '-slaves']:
slave_nodes.append(instance)
logging.info('%d master, %d slave', len(master_nodes), len(slave_nodes))
if master_nodes:
conn.create_tags([i.id for i in master_nodes],
{'spark_node_type': 'master'})
if slave_nodes:
conn.create_tags([i.id for i in slave_nodes],
{'spark_node_type': 'slave'})
if slave_nodes or master_nodes:
ids = [i.id for l in (master_nodes, slave_nodes) for i in l]
conn.create_tags(ids, {'Owner': 'cmccoy',
'Purpose': 'b-cell-selection',
'spark_cluster_name': a.cluster_name})
logging.info("Tagged nodes.")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| gpl-3.0 |
vmturbo/nova | nova/policies/certificates.py | 1 | 1520 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-certificates:%s'
certificates_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
base.create_rule_default(
POLICY_ROOT % 'create',
base.RULE_ADMIN_OR_OWNER,
"Create a root certificate. This API is deprecated.",
[
{
'method': 'POST',
'path': '/os-certificates'
}
]),
base.create_rule_default(
POLICY_ROOT % 'show',
base.RULE_ADMIN_OR_OWNER,
"Show details for a root certificate. This API is deprecated.",
[
{
'method': 'GET',
'path': '/os-certificates/root'
}
])
]
def list_rules():
return certificates_policies
| apache-2.0 |
coxmediagroup/googleads-python-lib | examples/dfp/v201505/custom_field_service/get_all_line_item_custom_fields.py | 3 | 2324 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom fields that apply to line items.
To create custom fields, run create_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CustomFieldService.getCustomFieldsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201505')
# Create statement to select only custom fields that apply to line items.
values = [{
'key': 'entityType',
'value': {
'xsi_type': 'TextValue',
'value': 'LINE_ITEM'
}
}]
query = 'WHERE entityType = :entityType'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get custom fields by statement.
while True:
response = custom_field_service.getCustomFieldsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for custom_field in response['results']:
print ('Custom field with ID \'%s\' and name \'%s\' was found.'
% (custom_field['id'], custom_field['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
cgarciabarrera/wazztrick | src/Examples/EchoClient.py | 16 | 3147 | '''
Copyright (c) <2012> Tarek Galal <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0,parentdir)
import time
from Yowsup.connectionmanager import YowsupConnectionManager
class WhatsappEchoClient:
def __init__(self, target, message, waitForReceipt=False):
self.jids = []
if '-' in target:
self.jids = ["%[email protected]" % target]
else:
self.jids = ["%[email protected]" % t for t in target.split(',')]
self.message = message
self.waitForReceipt = waitForReceipt
connectionManager = YowsupConnectionManager()
self.signalsInterface = connectionManager.getSignalsInterface()
self.methodsInterface = connectionManager.getMethodsInterface()
self.signalsInterface.registerListener("auth_success", self.onAuthSuccess)
self.signalsInterface.registerListener("auth_fail", self.onAuthFailed)
if waitForReceipt:
self.signalsInterface.registerListener("receipt_messageSent", self.onMessageSent)
self.gotReceipt = False
self.signalsInterface.registerListener("disconnected", self.onDisconnected)
self.done = False
def login(self, username, password):
self.username = username
self.methodsInterface.call("auth_login", (username, password))
while not self.done:
time.sleep(0.5)
def onAuthSuccess(self, username):
print("Authed %s" % username)
if self.waitForReceipt:
self.methodsInterface.call("ready")
if len(self.jids) > 1:
self.methodsInterface.call("message_broadcast", (self.jids, self.message))
else:
self.methodsInterface.call("message_send", (self.jids[0], self.message))
print("Sent message")
if self.waitForReceipt:
timeout = 5
t = 0;
while t < timeout and not self.gotReceipt:
time.sleep(0.5)
t+=1
if not self.gotReceipt:
print("print timedout!")
else:
print("Got sent receipt")
self.done = True
def onAuthFailed(self, username, err):
print("Auth Failed!")
def onDisconnected(self, reason):
print("Disconnected because %s" %reason)
def onMessageSent(self, jid, messageId):
self.gotReceipt = True
| mit |
PepperPD/edx-pepper-platform | common/lib/xmodule/xmodule/modulestore/parsers.py | 9 | 4058 | import re
# Prefix for the branch portion of a locator URL
BRANCH_PREFIX = "/branch/"
# Prefix for the block portion of a locator URL
BLOCK_PREFIX = "/block/"
# Prefix for the version portion of a locator URL, when it is preceded by a course ID
VERSION_PREFIX = "/version/"
# Prefix for version when it begins the URL (no course ID).
URL_VERSION_PREFIX = 'version/'
URL_RE = re.compile(r'^edx://(.+)$', re.IGNORECASE)
def parse_url(string):
"""
A url must begin with 'edx://' (case-insensitive match),
followed by either a version_guid or a course_id.
Examples:
'edx://version/0123FFFF'
'edx://mit.eecs.6002x'
'edx://mit.eecs.6002x;published'
'edx://mit.eecs.6002x;published/block/HW3'
'edx://mit.eecs.6002x;published/version/000eee12345/block/HW3'
This returns None if string cannot be parsed.
If it can be parsed as a version_guid with no preceding course_id, returns a dict
with key 'version_guid' and the value,
If it can be parsed as a course_id, returns a dict
with key 'id' and optional keys 'branch' and 'version_guid'.
"""
match = URL_RE.match(string)
if not match:
return None
path = match.group(1)
if path.startswith(URL_VERSION_PREFIX):
return parse_guid(path[len(URL_VERSION_PREFIX):])
return parse_course_id(path)
BLOCK_RE = re.compile(r'^\w+$', re.IGNORECASE)
def parse_block_ref(string):
r"""
A block_ref is a string of word_chars.
<word_chars> matches one or more Unicode word characters; this includes most
characters that can be part of a word in any language, as well as numbers
and the underscore. (see definition of \w in python regular expressions,
at http://docs.python.org/dev/library/re.html)
If string is a block_ref, returns a dict with key 'block_ref' and the value,
otherwise returns None.
"""
if len(string) > 0 and BLOCK_RE.match(string):
return {'block': string}
return None
GUID_RE = re.compile(r'^(?P<version_guid>[A-F0-9]+)(' + BLOCK_PREFIX + '(?P<block>\w+))?$', re.IGNORECASE)
def parse_guid(string):
"""
A version_guid is a string of hex digits (0-F).
If string is a version_guid, returns a dict with key 'version_guid' and the value,
otherwise returns None.
"""
m = GUID_RE.match(string)
if m is not None:
return m.groupdict()
else:
return None
COURSE_ID_RE = re.compile(
r'^(?P<id>(\w+)(\.\w+\w*)*)(' +
BRANCH_PREFIX + '(?P<branch>\w+))?(' +
VERSION_PREFIX + '(?P<version_guid>[A-F0-9]+))?(' +
BLOCK_PREFIX + '(?P<block>\w+))?$', re.IGNORECASE
)
def parse_course_id(string):
r"""
A course_id has a main id component.
There may also be an optional branch (/branch/published or /branch/draft).
There may also be an optional version (/version/519665f6223ebd6980884f2b).
There may also be an optional block (/block/HW3 or /block/Quiz2).
Examples of valid course_ids:
'mit.eecs.6002x'
'mit.eecs.6002x/branch/published'
'mit.eecs.6002x/block/HW3'
'mit.eecs.6002x/branch/published/block/HW3'
'mit.eecs.6002x/branch/published/version/519665f6223ebd6980884f2b/block/HW3'
Syntax:
course_id = main_id [/branch/ branch] [/version/ version ] [/block/ block]
main_id = name [. name]*
branch = name
block = name
name = <word_chars>
<word_chars> matches one or more Unicode word characters; this includes most
characters that can be part of a word in any language, as well as numbers
and the underscore. (see definition of \w in python regular expressions,
at http://docs.python.org/dev/library/re.html)
If string is a course_id, returns a dict with keys 'id', 'branch', and 'block'.
Revision is optional: if missing returned_dict['branch'] is None.
Block is optional: if missing returned_dict['block'] is None.
Else returns None.
"""
match = COURSE_ID_RE.match(string)
if not match:
return None
return match.groupdict()
| agpl-3.0 |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/backends/spatialite/operations.py | 282 | 14391 | import re
from decimal import Decimal
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import DatabaseOperations
from django.db.utils import DatabaseError
class SpatiaLiteOperator(SpatialOperation):
"For SpatiaLite operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(SpatiaLiteOperator, self).__init__(operator=operator)
class SpatiaLiteFunction(SpatialFunction):
"For SpatiaLite function calls."
def __init__(self, function, **kwargs):
super(SpatiaLiteFunction, self).__init__(function, **kwargs)
class SpatiaLiteFunctionParam(SpatiaLiteFunction):
"For SpatiaLite functions that take another parameter."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class SpatiaLiteDistance(SpatiaLiteFunction):
"For SpatiaLite distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, operator):
super(SpatiaLiteDistance, self).__init__(self.dist_func,
operator=operator)
class SpatiaLiteRelate(SpatiaLiteFunctionParam):
"For SpatiaLite Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(SpatiaLiteRelate, self).__init__('Relate')
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float, int, long)
def get_dist_ops(operator):
"Returns operations for regular distances; spherical distances are not currently supported."
return (SpatiaLiteDistance(operator),)
class SpatiaLiteOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'spatialite'
spatialite = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in ('Extent', 'Union')])
Adapter = SpatiaLiteAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'Area'
centroid = 'Centroid'
contained = 'MbrWithin'
difference = 'Difference'
distance = 'Distance'
envelope = 'Envelope'
intersection = 'Intersection'
length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword
num_geom = 'NumGeometries'
num_points = 'NumPoints'
point_on_surface = 'PointOnSurface'
scale = 'ScaleCoords'
svg = 'AsSVG'
sym_difference = 'SymDifference'
transform = 'Transform'
translate = 'ShiftCoords'
union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword
unionagg = 'GUnion'
from_text = 'GeomFromText'
from_wkb = 'GeomFromWKB'
select = 'AsText(%s)'
geometry_functions = {
'equals' : SpatiaLiteFunction('Equals'),
'disjoint' : SpatiaLiteFunction('Disjoint'),
'touches' : SpatiaLiteFunction('Touches'),
'crosses' : SpatiaLiteFunction('Crosses'),
'within' : SpatiaLiteFunction('Within'),
'overlaps' : SpatiaLiteFunction('Overlaps'),
'contains' : SpatiaLiteFunction('Contains'),
'intersects' : SpatiaLiteFunction('Intersects'),
'relate' : (SpatiaLiteRelate, basestring),
# Retruns true if B's bounding box completely contains A's bounding box.
'contained' : SpatiaLiteFunction('MbrWithin'),
# Returns true if A's bounding box completely contains B's bounding box.
'bbcontains' : SpatiaLiteFunction('MbrContains'),
# Returns true if A's bounding box overlaps B's bounding box.
'bboverlaps' : SpatiaLiteFunction('MbrOverlaps'),
# These are implemented here as synonyms for Equals
'same_as' : SpatiaLiteFunction('Equals'),
'exact' : SpatiaLiteFunction('Equals'),
}
distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
}
geometry_functions.update(distance_functions)
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self.connection = connection
# Determine the version of the SpatiaLite library.
try:
vtup = self.spatialite_version_tuple()
version = vtup[1:]
if version < (2, 3, 0):
raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions '
'2.3.0 and above')
self.spatial_version = version
except ImproperlyConfigured:
raise
except Exception, msg:
raise ImproperlyConfigured('Cannot determine the SpatiaLite version for the "%s" '
'database (error was "%s"). Was the SpatiaLite initialization '
'SQL loaded on this database?' %
(self.connection.settings_dict['NAME'], msg))
# Creating the GIS terms dictionary.
gis_terms = ['isnull']
gis_terms += self.geometry_functions.keys()
self.gis_terms = dict([(term, None) for term in gis_terms])
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_geom(self, wkt, geo_field):
"""
Converts geometry WKT returned from a SpatiaLite aggregate.
"""
if wkt:
return Geometry(wkt, geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns None because geometry columnas are added via the
`AddGeometryColumn` stored procedure on SpatiaLite.
"""
return None
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type. SpatiaLite only supports regular
cartesian-based queries (no spheroid/sphere calculations for point
geometries like PostGIS).
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SpatiaLite does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
Transform() and GeomFromText() function call(s).
"""
def transform_value(value, srid):
return not (value is None or value.srid == srid)
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
if transform_value(value, f.srid):
# Adding Transform() to the SQL placeholder.
return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid)
else:
return '%s(%%s,%s)' % (self.from_text, f.srid)
def _get_spatialite_func(self, func):
"""
Helper routine for calling SpatiaLite functions and returning
their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s' % func)
row = cursor.fetchone()
except:
# Responsibility of caller to perform error handling.
raise
finally:
cursor.close()
return row[0]
def geos_version(self):
"Returns the version of GEOS used by SpatiaLite as a string."
return self._get_spatialite_func('geos_version()')
def proj4_version(self):
"Returns the version of the PROJ.4 library used by SpatiaLite."
return self._get_spatialite_func('proj4_version()')
def spatialite_version(self):
"Returns the SpatiaLite library version as a string."
return self._get_spatialite_func('spatialite_version()')
def spatialite_version_tuple(self):
"""
Returns the SpatiaLite version as a tuple (version string, major,
minor, subminor).
"""
# Getting the SpatiaLite version.
try:
version = self.spatialite_version()
except DatabaseError:
# The `spatialite_version` function first appeared in version 2.3.1
# of SpatiaLite, so doing a fallback test for 2.3.0 (which is
# used by popular Debian/Ubuntu packages).
version = None
try:
tmp = self._get_spatialite_func("X(GeomFromText('POINT(1 1)'))")
if tmp == 1.0: version = '2.3.0'
except DatabaseError:
pass
# If no version string defined, then just re-raise the original
# exception.
if version is None: raise
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse SpatiaLite version string: %s' % version)
return (version, major, minor1, minor2)
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = self.select % '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Returns the SpatiaLite-specific SQL for the given lookup value
[a tuple of (alias, column, db_type)], lookup type, lookup
value, the model field, and the quoting function.
"""
alias, col, db_type = lvalue
# Getting the quoted field as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_functions:
# See if a SpatiaLite geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the SpatiaLiteOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
if len(value) != 2:
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(value[1])
elif lookup_type in self.distance_functions:
op = op[0]
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.spatialite.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys
return SpatialRefSys
| apache-2.0 |
albertrdixon/CouchPotatoServer | libs/html5lib/treewalkers/genshistream.py | 1730 | 2278 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
| gpl-3.0 |
sestrella/ansible | lib/ansible/modules/network/fortimanager/fmgr_provisioning.py | 52 | 11458 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_provisioning
version_added: "2.7"
author: Andrew Welsh (@Ghilli3)
short_description: Provision devices via FortiMananger
description:
- Add model devices on the FortiManager using jsonrpc API and have them pre-configured,
so when central management is configured, the configuration is pushed down to the
registering devices
options:
adom:
description:
- The administrative domain (admon) the configuration belongs to
required: true
vdom:
description:
- The virtual domain (vdom) the configuration belongs to
host:
description:
- The FortiManager's Address.
required: true
username:
description:
- The username to log into the FortiManager
required: true
password:
description:
- The password associated with the username account.
required: false
policy_package:
description:
- The name of the policy package to be assigned to the device.
required: True
name:
description:
- The name of the device to be provisioned.
required: True
group:
description:
- The name of the device group the provisioned device can belong to.
required: False
serial:
description:
- The serial number of the device that will be provisioned.
required: True
platform:
description:
- The platform of the device, such as model number or VM.
required: True
description:
description:
- Description of the device to be provisioned.
required: False
os_version:
description:
- The Fortinet OS version to be used for the device, such as 5.0 or 6.0.
required: True
minor_release:
description:
- The minor release number such as 6.X.1, as X being the minor release.
required: False
patch_release:
description:
- The patch release number such as 6.0.X, as X being the patch release.
required: False
os_type:
description:
- The Fortinet OS type to be pushed to the device, such as 'FOS' for FortiOS.
required: True
'''
EXAMPLES = '''
- name: Create FGT1 Model Device
fmgr_provisioning:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
vdom: "root"
policy_package: "default"
name: "FGT1"
group: "Ansible"
serial: "FGVM000000117994"
platform: "FortiGate-VM64"
description: "Provisioned by Ansible"
os_version: '6.0'
minor_release: 0
patch_release: 0
os_type: 'fos'
- name: Create FGT2 Model Device
fmgr_provisioning:
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
vdom: "root"
policy_package: "test_pp"
name: "FGT2"
group: "Ansible"
serial: "FGVM000000117992"
platform: "FortiGate-VM64"
description: "Provisioned by Ansible"
os_version: '5.0'
minor_release: 6
patch_release: 0
os_type: 'fos'
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def dev_group_exists(fmg, dev_grp_name, adom):
datagram = {
'adom': adom,
'name': dev_grp_name,
}
url = '/dvmdb/adom/{adom}/group/{dev_grp_name}'.format(adom=adom, dev_grp_name=dev_grp_name)
response = fmg.get(url, datagram)
return response
def prov_template_exists(fmg, prov_template, adom, vdom):
datagram = {
'name': prov_template,
'adom': adom,
}
url = '/pm/devprof/adom/{adom}/devprof/{name}'.format(adom=adom, name=prov_template)
response = fmg.get(url, datagram)
return response
def create_model_device(fmg, name, serial, group, platform, os_version,
os_type, minor_release, patch_release=0, adom='root'):
datagram = {
'adom': adom,
'flags': ['create_task', 'nonblocking'],
'groups': [{'name': group, 'vdom': 'root'}],
'device': {
'mr': minor_release,
'name': name,
'sn': serial,
'mgmt_mode': 'fmg',
'device action': 'add_model',
'platform_str': platform,
'os_ver': os_version,
'os_type': os_type,
'patch': patch_release,
'desc': 'Provisioned by Ansible',
}
}
url = '/dvm/cmd/add/device'
response = fmg.execute(url, datagram)
return response
def update_flags(fmg, name):
datagram = {
'flags': ['is_model', 'linked_to_model']
}
url = 'dvmdb/device/{name}'.format(name=name)
response = fmg.update(url, datagram)
return response
def assign_provision_template(fmg, template, adom, target):
datagram = {
'name': template,
'type': 'devprof',
'description': 'Provisioned by Ansible',
'scope member': [{'name': target}]
}
url = "/pm/devprof/adom/{adom}".format(adom=adom)
response = fmg.update(url, datagram)
return response
def set_devprof_scope(self, provisioning_template, adom, provision_targets):
"""
GET the DevProf (check to see if exists)
"""
fields = dict()
targets = []
fields["name"] = provisioning_template
fields["type"] = "devprof"
fields["description"] = "CreatedByAnsible"
for target in provision_targets.strip().split(","):
# split the host on the space to get the mask out
new_target = {"name": target}
targets.append(new_target)
fields["scope member"] = targets
body = {"method": "set", "params": [{"url": "/pm/devprof/adom/{adom}".format(adom=adom),
"data": fields, "session": self.session}]}
response = self.make_request(body).json()
return response
def assign_dev_grp(fmg, grp_name, device_name, vdom, adom):
datagram = {
'name': device_name,
'vdom': vdom,
}
url = "/dvmdb/adom/{adom}/group/{grp_name}/object member".format(adom=adom, grp_name=grp_name)
response = fmg.set(url, datagram)
return response
def update_install_target(fmg, device, pp='default', vdom='root', adom='root'):
datagram = {
'scope member': [{'name': device, 'vdom': vdom}],
'type': 'pkg'
}
url = '/pm/pkg/adom/{adom}/{pkg_name}'.format(adom=adom, pkg_name=pp)
response = fmg.update(url, datagram)
return response
def install_pp(fmg, device, pp='default', vdom='root', adom='root'):
datagram = {
'adom': adom,
'flags': 'nonblocking',
'pkg': pp,
'scope': [{'name': device, 'vdom': vdom}],
}
url = 'securityconsole/install/package'
response = fmg.execute(url, datagram)
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str"),
vdom=dict(required=False, type="str"),
host=dict(required=True, type="str"),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"]), no_log=True),
policy_package=dict(required=False, type="str"),
name=dict(required=False, type="str"),
group=dict(required=False, type="str"),
serial=dict(required=True, type="str"),
platform=dict(required=True, type="str"),
description=dict(required=False, type="str"),
os_version=dict(required=True, type="str"),
minor_release=dict(required=False, type="str"),
patch_release=dict(required=False, type="str"),
os_type=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec, supports_check_mode=True, )
# check if params are set
if module.params["host"] is None or module.params["username"] is None:
module.fail_json(msg="Host and username are required for connection")
# check if login failed
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if "FortiManager instance connnected" not in str(response):
module.fail_json(msg="Connection to FortiManager Failed")
else:
if module.params["policy_package"] is None:
module.params["policy_package"] = 'default'
if module.params["adom"] is None:
module.params["adom"] = 'root'
if module.params["vdom"] is None:
module.params["vdom"] = 'root'
if module.params["platform"] is None:
module.params["platform"] = 'FortiGate-VM64'
if module.params["os_type"] is None:
module.params["os_type"] = 'fos'
results = create_model_device(fmg,
module.params["name"],
module.params["serial"],
module.params["group"],
module.params["platform"],
module.params["os_ver"],
module.params["os_type"],
module.params["minor_release"],
module.params["patch_release"],
module.params["adom"])
if results[0] != 0:
module.fail_json(msg="Create model failed", **results)
results = update_flags(fmg, module.params["name"])
if results[0] != 0:
module.fail_json(msg="Update device flags failed", **results)
# results = assign_dev_grp(fmg, 'Ansible', 'FGVM000000117992', 'root', 'root')
# if not results[0] == 0:
# module.fail_json(msg="Setting device group failed", **results)
results = update_install_target(fmg, module.params["name"], module.params["policy_package"])
if results[0] != 0:
module.fail_json(msg="Adding device target to package failed", **results)
results = install_pp(fmg, module.params["name"], module.params["policy_package"])
if results[0] != 0:
module.fail_json(msg="Installing policy package failed", **results)
fmg.logout()
# results is returned as a tuple
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
TheBraveWarrior/pyload | module/plugins/hoster/LinkifierCom.py | 5 | 2486 | # -*- coding: utf-8 -*-
import hashlib
import pycurl
from ..internal.MultiHoster import MultiHoster
from ..internal.misc import json, seconds_to_midnight
class LinkifierCom(MultiHoster):
__name__ = "AlldebridCom"
__type__ = "hoster"
__version__ = "0.02"
__status__ = "testing"
__pattern__ = r'^unmatchable$'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True)]
__description__ = """Linkifier.com multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
API_KEY = "d046c4309bb7cabd19f49118a2ab25e0"
API_URL = "https://api.linkifier.com/downloadapi.svc/"
def api_response(self, method, user, password, **kwargs):
post = {'login': user,
'md5Pass': hashlib.md5(password).hexdigest(),
'apiKey': self.API_KEY}
post.update(kwargs)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Content-Type: application/json; charset=utf-8"])
res = json.loads(self.load(self.API_URL + method,
post=json.dumps(post)))
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Content-Type: text/html; charset=utf-8"])
return res
def setup(self):
self.multiDL = True
def handle_premium(self, pyfile):
json_data = self.api_response("stream",
self.account.user,
self.account.info['login']['password'],
url=pyfile.url)
if json_data['hasErrors']:
error_msg = json_data['ErrorMSG'] or "Unknown error"
if error_msg in ("Customer reached daily limit for current hoster",
"Accounts are maxed out for current hoster"):
self.retry(wait=seconds_to_midnight())
self.fail(error_msg)
self.resume_download = json_data['con_resume']
self.chunk_limit = json_data.get('con_max', 1) or 1
self.download(json_data['url'], fixurl=False)
| gpl-3.0 |
max3903/SFLphone | daemon/libs/pjproject/tests/cdash/cfg_msvc.py | 107 | 2726 | #
# cfg_msvc.py - MSVC/Visual Studio target configurator
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import builder
import os
import sys
# Each configurator must export this function
def create_builder(args):
usage = """\
Usage:
main.py cfg_msvc [-h|--help] [-t|--target TARGET] [cfg_site]
Arguments:
cfg_site: site configuration module. If not specified, "cfg_site"
is implied
-t,--target TARGET: Visual Studio build configuration to build. Default is
"Release|Win32". Sample values: "Debug|Win32"
-h, --help Show this help screen
"""
cfg_site = "cfg_site"
target = "Release|Win32"
in_option = ""
for arg in args:
if in_option=="-t":
target = arg
in_option = ""
elif arg=="--target" or arg=="-t":
in_option = "-t"
elif arg=="-h" or arg=="--help":
print usage
sys.exit(0)
elif arg[0]=="-":
print usage
sys.exit(1)
else:
cfg_site = arg
if os.access(cfg_site+".py", os.F_OK) == False:
print "Error: file '%s.py' doesn't exist." % (cfg_site)
sys.exit(1)
cfg_site = __import__(cfg_site)
test_cfg = builder.BaseConfig(cfg_site.BASE_DIR, \
cfg_site.URL, \
cfg_site.SITE_NAME, \
cfg_site.GROUP, \
cfg_site.OPTIONS)
config_site = "#define PJ_TODO(x)\n" + cfg_site.CONFIG_SITE
user_mak = cfg_site.USER_MAK
builders = [
builder.MSVCTestBuilder(test_cfg,
target=target,
build_config_name="default",
config_site=config_site,
exclude=cfg_site.EXCLUDE,
not_exclude=cfg_site.NOT_EXCLUDE)
]
return builders
| gpl-3.0 |
jjx02230808/project0223 | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
alvarogzp/telegram-bot | bot/bot.py | 1 | 11029 | import time
from bot import project_info
from bot.action.core.action import Action
from bot.action.core.update import Update
from bot.action.standard.about import VersionAction
from bot.action.standard.admin.config_status import ConfigStatus
from bot.action.util.textformat import FormattedText
from bot.api.api import Api
from bot.api.async import AsyncApi
from bot.api.telegram import TelegramBotApi
from bot.logger.admin_logger import AdminLogger
from bot.logger.worker_logger import WorkerStartStopLogger
from bot.multithreading.scheduler import SchedulerApi
from bot.storage import Config, Cache
from bot.storage import State
CONFIG_DIR = "config"
STATE_DIR = "state"
class Bot:
def __init__(self, project_name: str = ""):
"""
:param project_name: Optional name to be displayed on starting message on admin chat.
"""
self.config = Config(CONFIG_DIR)
self.state = State(STATE_DIR)
self.cache = Cache()
debug = self.config.debug()
telegram_api = TelegramBotApi(self.config.auth_token, self.config.reuse_connections(), debug)
self.api = Api(telegram_api, self.state)
self.cache.bot_info = self.api.getMe()
self.logger = AdminLogger(self.api, self.config.admin_chat_id, debug, self.config.traceback_chat_id())
self.scheduler = self._create_scheduler()
self.starting(project_name)
if self.config.async():
self.scheduler.setup()
self.api.enable_async(AsyncApi(self.api, self.scheduler))
self.action = Action()
self.update_processor = UpdateProcessor(self.action, self.logger)
def _create_scheduler(self):
max_network_workers = int(self.config.max_network_workers)
worker_logger = WorkerStartStopLogger(self.logger.logger)
return SchedulerApi(
max_network_workers, self.logger.work_error, worker_logger.worker_start, worker_logger.worker_stop
)
def set_action(self, action: Action):
action.setup(self.api, self.config, self.state, self.cache, self.scheduler)
self.action = action
self.update_processor = UpdateProcessor(self.action, self.logger)
def run(self):
try:
self.main_loop()
except KeyboardInterrupt:
self.logger.info("KeyboardInterrupt")
except SystemExit as e:
self.logger.info("SystemExit: " + str(e))
raise e
except BaseException as e:
self.logger.error(e, "Fatal error")
raise e
finally:
self.shutdown()
def starting(self, project_name: str):
starting_info = []
if project_name:
version = VersionAction.get_version(project_name)
starting_info.append(
FormattedText()
.normal("Running: {name} {version}")
.start_format()
.bold(name=project_name, version=version)
.end_format()
)
framework_version = VersionAction.get_version(project_info.name)
starting_info.append(
FormattedText()
.normal("Framework: {name} {version}")
.start_format()
.bold(name=project_info.name, version=framework_version)
.end_format()
)
config_status = ConfigStatus(self.config, self.state).get_config_status()
starting_info.extend(config_status)
self.logger.info_formatted_text(
FormattedText().bold("Starting"),
*starting_info
)
def main_loop(self):
while True:
self.process_pending_updates()
self.process_normal_updates()
def process_pending_updates(self):
PendingUpdatesProcessor(self.api.get_pending_updates, self.logger, self.config, self.update_processor).run()
def process_normal_updates(self):
NormalUpdatesProcessor(self.api.get_updates, self.logger, self.config, self.update_processor).run()
def shutdown(self):
self.action.shutdown()
self.scheduler.shutdown()
self.logger.info("Finished")
class UpdateProcessor:
def __init__(self, action: Action, logger: AdminLogger):
self.action = action
self.logger = logger
def process_update(self, update: Update):
try:
self.action.process(update)
except Exception as e:
# As logger errors are probably API failures that the next updates may also get,
# let them to be propagated so that no more updates are processed before waiting some time
self.logger.error(e, "process_update")
class UpdatesProcessor:
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
self.get_updates_func = get_updates_func
self.logger = logger
self.config = config
self.update_processor = update_processor
self.last_error = None
self.number_of_updates_processed = 0
def run(self):
self.processing_starting()
try:
self.__processing_loop()
finally:
self.processing_ended()
self.processing_ended_successfully()
def __processing_loop(self):
while self.should_keep_processing_updates():
self.__get_and_process_handling_errors()
def __get_and_process_handling_errors(self):
try:
self.__get_and_process()
except Exception as e:
self.__handle_error(e)
# notify there has been an error
self.processing_error(e)
else:
# notify successful processing
self.processing_successful()
def __get_and_process(self):
for update in self.get_updates_func():
self.update_processor.process_update(update)
self.number_of_updates_processed += 1
def __handle_error(self, error: Exception):
sleep_seconds = self.config.sleep_seconds_on_get_updates_error
# we do not want to let non-fatal (eg. API) errors to escape from here
self.safe_log_error(error, "get_and_process", "Sleeping for {seconds} seconds.".format(seconds=sleep_seconds))
# there has been an error while getting updates, sleep a little to give a chance
# for the server or the network to recover (if that was the case), and to not to flood the server
time.sleep(int(sleep_seconds))
def safe_log_error(self, error: Exception, *info: str):
"""Log error failing silently on error"""
self.__do_safe(lambda: self.logger.error(error, *info))
def safe_log_info(self, *info: str):
"""Log info failing silently on error"""
self.__do_safe(lambda: self.logger.info(*info))
@staticmethod
def __do_safe(func: callable):
try:
return func()
except Exception:
pass
def should_keep_processing_updates(self):
raise NotImplementedError()
def processing_successful(self):
"""Updates were processed successfully"""
self.last_error = None
def processing_error(self, error: Exception):
"""There has been an error while processing the last updates"""
self.last_error = error
def processing_starting(self):
"""Updates are about to start being processed"""
pass
def processing_ended(self):
"""Processing has ended, we don't know if successfully or caused by an error"""
self.safe_log_info(
"Ending",
"Updates processed: {updates_processed_number}"
.format(updates_processed_number=self.number_of_updates_processed)
)
def processing_ended_successfully(self):
"""Processing has ended successfully"""
pass
class PendingUpdatesProcessor(UpdatesProcessor):
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
super().__init__(get_updates_func, logger, config, update_processor)
# set to some value other than None to let the processing run the first time
self.last_error = True
def should_keep_processing_updates(self):
# if there has been an error not all pending updates were processed
# so try again until it ends without error
was_error = self.last_error is not None
if was_error and self.last_error is not True:
self.safe_log_info("Restarting", "Recovered from error. Continue processing pending updates...")
return was_error
def processing_starting(self):
self.safe_log_info("Pending", "Processing pending updates...")
def processing_ended_successfully(self):
self.safe_log_info("Continuing", "Pending updates successfully processed.")
class NormalUpdatesProcessor(UpdatesProcessor):
def __init__(self, get_updates_func: callable, logger: AdminLogger, config: Config,
update_processor: UpdateProcessor):
super().__init__(get_updates_func, logger, config, update_processor)
self.last_successful_processing = time.time()
def processing_successful(self):
super().processing_successful()
self.last_successful_processing = time.time()
def should_keep_processing_updates(self):
if self.last_error is None:
# if last processing ended without error, keep going!
return True
error_seconds_in_normal_mode = time.time() - self.last_successful_processing
max_error_seconds_allowed_in_normal_mode = int(self.config.max_error_seconds_allowed_in_normal_mode)
if error_seconds_in_normal_mode > max_error_seconds_allowed_in_normal_mode:
# it has happened too much time since last successful processing
# although it does not mean no update have been processed, we are
# having problems and updates are being delayed, so going back to
# process pending updates mode
self.safe_log_info(
"Restarting",
"Exceeded {max_seconds} maximum seconds with errors (current value: {seconds} seconds)"
.format(max_seconds=max_error_seconds_allowed_in_normal_mode,
seconds=int(error_seconds_in_normal_mode)),
"Switching to pending updates mode."
)
return False
else:
self.safe_log_info(
"Restarted",
"Recovered from error (current error burst duration is {seconds} seconds of "
"{max_seconds} maximum seconds allowed)"
.format(seconds=int(error_seconds_in_normal_mode),
max_seconds=max_error_seconds_allowed_in_normal_mode),
"Continuing in normal updates mode."
)
return True
def processing_starting(self):
self.safe_log_info("Started", "Switched to normal updates mode.")
| agpl-3.0 |
jaggu303619/asylum-v2.0 | openerp/addons/point_of_sale/report/pos_payment_report_user.py | 62 | 3251 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class pos_payment_report_user(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_payment_report_user, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'pos_payment_user': self.__pos_payment_user__,
'pos_payment_user_total':self.__pos_payment_user__total__,
})
def __pos_payment_user__(self, form):
data={}
ids = form['user_id']
sql = "select pt.name,pp.default_code as code,pol.qty,pu.name as uom,pol.discount,pol.price_unit, " \
"(pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)) as total " \
"from pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt,product_uom as pu " \
"where pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id and pu.id=pt.uom_id " \
"and po.state in ('paid','invoiced') and to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::date = current_date " \
"and po.user_id IN %s"
self.cr.execute (sql, (tuple(ids), ))
data=self.cr.dictfetchall()
return data
def __pos_payment_user__total__(self, form):
res=[]
ids = form['user_id']
self.cr.execute ("select sum(pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)) " \
"from pos_order as po,pos_order_line as pol,product_product as pp,product_template as pt " \
"where pt.id=pp.product_tmpl_id and pp.id=pol.product_id and po.id = pol.order_id " \
"and po.state='paid' and to_char(date_trunc('day',po.date_order),'YYYY-MM-DD')::date = current_date " \
"and po.user_id IN %s",(tuple(ids),))
res=self.cr.fetchone()
res = res and res[0] or 0.0
return res
report_sxw.report_sxw('report.pos.payment.report.user', 'pos.order', 'addons/point_of_sale/report/pos_payment_report_user.rml', parser=pos_payment_report_user,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Ruide/angr-dev | angr/angr/misc/autoimport.py | 4 | 2072 | import os
import importlib
import logging
l = logging.getLogger('angr.misc.autoimport')
def auto_import_packages(base_module, base_path, ignore_dirs=(), ignore_files=(), scan_modules=True):
for lib_module_name in os.listdir(base_path):
if lib_module_name in ignore_dirs:
continue
lib_path = os.path.join(base_path, lib_module_name)
if not os.path.isdir(lib_path):
l.debug("Not a dir: %s", lib_module_name)
continue
l.debug("Loading %s.%s", base_module, lib_module_name)
try:
package = importlib.import_module(".%s" % lib_module_name, base_module)
except ImportError:
l.warning("Unable to autoimport package %s.%s", base_module, lib_module_name, exc_info=True)
else:
if scan_modules:
for name, mod in auto_import_modules('%s.%s' % (base_module, lib_module_name), lib_path, ignore_files=ignore_files):
if name not in dir(package):
setattr(package, name, mod)
yield lib_module_name, package
def auto_import_modules(base_module, base_path, ignore_files=()):
for proc_file_name in os.listdir(base_path):
if not proc_file_name.endswith('.py'):
continue
if proc_file_name in ignore_files or proc_file_name == '__init__.py':
continue
proc_module_name = proc_file_name[:-3]
try:
proc_module = importlib.import_module(".%s" % proc_module_name, base_module)
except ImportError:
l.warning("Unable to autoimport module %s.%s", base_module, proc_module_name, exc_info=True)
continue
else:
yield proc_module_name, proc_module
def filter_module(mod, type_req=None, subclass_req=None):
for name in dir(mod):
val = getattr(mod, name)
if type_req is not None and not isinstance(val, type_req):
continue
if subclass_req is not None and not issubclass(val, subclass_req):
continue
yield name, val
| bsd-2-clause |
Nu3001/external_chromium_org | chrome/browser/safe_browsing/safe_browsing_testserver.py | 74 | 1483 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wraps the upstream safebrowsing_test_server.py to run in Chrome tests."""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', 'net',
'tools', 'testserver'))
import testserver_base
class ServerRunner(testserver_base.TestServerRunner):
"""TestServerRunner for safebrowsing_test_server.py."""
def create_server(self, server_data):
sys.path.append(os.path.join(BASE_DIR, '..', '..', '..', 'third_party',
'safe_browsing', 'testing'))
import safebrowsing_test_server
server = safebrowsing_test_server.SetupServer(
self.options.data_file, self.options.host, self.options.port,
opt_enforce_caching=False, opt_validate_database=True)
print 'Safebrowsing HTTP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
return server
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--data-file', dest='data_file',
help='File containing safebrowsing test '
'data and expectations')
if __name__ == '__main__':
sys.exit(ServerRunner().main())
| bsd-3-clause |
antivirtel/Flexget | flexget/plugins/input/rottentomatoes_list.py | 13 | 2974 | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
try:
from flexget.plugins.api_rottentomatoes import lists
except ImportError:
raise plugin.DependencyError(issued_by='rottentomatoes_lookup', missing='api_rottentomatoes',
message='rottentomatoes_lookup requires the `api_rottentomatoes` plugin')
log = logging.getLogger('rottentomatoes_list')
class RottenTomatoesList(object):
"""
Emits an entry for each movie in a Rotten Tomatoes list.
Configuration:
dvds:
- top_rentals
- upcoming
movies:
- box_office
Possible lists are
* dvds: top_rentals, current_releases, new_releases, upcoming
* movies: box_office, in_theaters, opening, upcoming
"""
def __init__(self):
# We could pull these from the API through lists.json but that's extra web/API key usage
self.dvd_lists = ['top_rentals', 'current_releases', 'new_releases', 'upcoming']
self.movie_lists = ['box_office', 'in_theaters', 'opening', 'upcoming']
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('list', key='dvds').accept('choice').accept_choices(self.dvd_lists)
root.accept('list', key='movies').accept('choice').accept_choices(self.movie_lists)
root.accept('text', key='api_key')
return root
@cached('rottentomatoes_list', persist='2 hours')
def on_task_input(self, task, config):
entries = []
api_key = config.get('api_key', None)
for l_type, l_names in config.items():
if type(l_names) is not list:
continue
for l_name in l_names:
results = lists(list_type=l_type, list_name=l_name, api_key=api_key)
if results:
for movie in results['movies']:
if [entry for entry in entries if movie['title'] == entry.get('title')]:
continue
imdb_id = movie.get('alternate_ids', {}).get('imdb')
if imdb_id:
imdb_id = 'tt' + str(imdb_id)
entries.append(Entry(title=movie['title'], rt_id=movie['id'],
imdb_id=imdb_id,
rt_name=movie['title'],
url=movie['links']['alternate']))
else:
log.critical('Failed to fetch Rotten tomatoes %s list: %s. List doesn\'t exist?' %
(l_type, l_name))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RottenTomatoesList, 'rottentomatoes_list', api_ver=2)
| mit |
zhujzhuo/Sahara | sahara/service/edp/oozie/workflow_creator/hive_workflow.py | 14 | 1576 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp.oozie.workflow_creator import base_workflow
from sahara.utils import xmlutils as x
class HiveWorkflowCreator(base_workflow.OozieWorkflowCreator):
def __init__(self):
super(HiveWorkflowCreator, self).__init__('hive')
hive_elem = self.doc.getElementsByTagName('hive')[0]
hive_elem.setAttribute('xmlns', 'uri:oozie:hive-action:0.2')
def build_workflow_xml(self, script, job_xml, prepare={},
configuration=None, params={},
files=[], archives=[]):
for k in sorted(prepare):
self._add_to_prepare_element(k, prepare[k])
self._add_job_xml_element(job_xml)
self._add_configuration_elements(configuration)
x.add_text_element_to_tag(self.doc, self.tag_name,
'script', script)
x.add_equal_separated_dict(self.doc, self.tag_name, 'param', params)
self._add_files_and_archives(files, archives)
| apache-2.0 |
edbrannin/library-searcher-python | render_results.py | 1 | 2109 | import click
from jinja2 import Environment, FileSystemLoader
from model import *
session = setup_sqlalchemy()()
env = Environment(loader=FileSystemLoader("."))
template = env.get_template("results.html.j2")
sql = """
select
r.author,
r.title,
h.branch_name,
h.collection_name,
h.call_class,
count(*) as "count"
from
resources r,
resource_holdings h,
resource_status s
where
r.id = h.item_id
and s.resource_id = h.item_id
and s.item_identifier = h.barcode
and position = 0
and branch_name in (
'Rochester Public Library Central',
'Irondequoit Public Library',
'Fairport Public Library'
)
and available = 1
group by
r.author,
r.title,
h.branch_name,
h.collection_name,
h.call_class
order by
author,
title,
branch_name,
collection_name asc,
call_class
"""
<<<<<<< HEAD
unlisted_sql = """
SELECT max(rr.search_query) search_query
, rr.author
, rr.title
FROM resources rr
WHERE NOT EXISTS
(SELECT 1
FROM resources r
, resource_holdings h
, resource_status s
WHERE r.id = h.item_id
AND s.resource_id = h.item_id
AND s.item_identifier = h.barcode
AND POSITION = 0
AND branch_name IN ( 'Rochester Public Library Central'
, 'Irondequoit Public Library' )
AND available = 1
AND r.author = rr.author
AND r.title = rr.title
GROUP BY r.author
, r.title
, h.branch_name
, h.collection_name
, h.call_class
ORDER BY author
, title
, branch_name
, collection_name ASC, call_class)
GROUP BY author
, title
ORDER BY search_query
, author
, title
"""
@click.command()
@click.argument("out_file", type=click.File('wb'))
def render_results(out_file):
rows = session.execute(sql).fetchall()
missing_rows = session.execute(unlisted_sql).fetchall()
text = template.render(rows=rows, missing_rows=missing_rows).encode('utf-8')
out_file.write(text)
if __name__ == '__main__':
render_results()
| mit |
esrf-emotion/emotion | emotion/task_utils.py | 1 | 2961 | import sys
import types
import gevent
import signal
import functools
class cleanup:
def __init__(self, *args, **keys):
self.cleanup_funcs = args
self.keys = keys
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if self.cleanup_funcs:
for cleanup_func in self.cleanup_funcs:
try:
cleanup_func(**self.keys)
except:
sys.excepthook(*sys.exc_info())
continue
if exc_type is not None:
raise exc_type, value, traceback
class error_cleanup:
def __init__(self, *args, **keys):
self.error_funcs = args
self.keys = keys
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if exc_type is not None:
if self.error_funcs:
for error_func in self.error_funcs:
try:
error_func(**self.keys)
except:
sys.excepthook(*sys.exc_info())
continue
# the previous try..except is resetting exception,
# so re-raise it from here
raise exc_type, value, traceback
class TaskException:
def __init__(self, exception, error_string, tb):
self.exception = exception
self.error_string = error_string
self.tb = tb
class wrap_errors(object):
def __init__(self, func):
"""Make a new function from `func', such that it catches all exceptions
and return it as a TaskException object
"""
self.func = func
def __call__(self, *args, **kwargs):
func = self.func
try:
return func(*args, **kwargs)
except:
return TaskException(*sys.exc_info())
def __str__(self):
return str(self.func)
def __repr__(self):
return repr(self.func)
def __getattr__(self, item):
return getattr(self.func, item)
def kill_with_kbint(g):
g.kill(KeyboardInterrupt)
def special_get(self, *args, **kwargs):
sigint_handler = gevent.signal(signal.SIGINT, functools.partial(kill_with_kbint, self))
try:
ret = self._get(*args, **kwargs)
finally:
sigint_handler.cancel()
if isinstance(ret, TaskException):
raise ret.exception, ret.error_string, ret.tb
else:
return ret
def task(func):
def start_task(*args, **kwargs):
wait = kwargs.pop("wait", True)
timeout = kwargs.pop("timeout", None)
t = gevent.spawn(wrap_errors(func), *args, **kwargs)
t._get = t.get
try:
setattr(t, "get", types.MethodType(special_get, t))
if wait:
return t.get(timeout=timeout)
else:
return t
except:
t.kill()
raise
return start_task
| gpl-2.0 |
MaDKaTZe/phantomjs | src/qt/qtwebkit/Tools/gtk/gtkdoc.py | 113 | 17832 | # Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import errno
import logging
import os
import os.path
import subprocess
import sys
class GTKDoc(object):
"""Class that controls a gtkdoc run.
Each instance of this class represents one gtkdoc configuration
and set of documentation. The gtkdoc package is a series of tools
run consecutively which converts inline C/C++ documentation into
docbook files and then into HTML. This class is suitable for
generating documentation or simply verifying correctness.
Keyword arguments:
output_dir -- The path where gtkdoc output should be placed. Generation
may overwrite file in this directory. Required.
module_name -- The name of the documentation module. For libraries this
is typically the library name. Required if not library path
is given.
source_dirs -- A list of paths to the source code to be scanned. Required.
ignored_files -- A list of filenames to ignore in the source directory. It is
only necessary to provide the basenames of these files.
Typically it is important to provide an updated list of
ignored files to prevent warnings about undocumented symbols.
decorator -- If a decorator is used to unhide certain symbols in header
files this parameter is required for successful scanning.
(default '')
deprecation_guard -- gtkdoc tries to ensure that symbols marked as deprecated
are encased in this C preprocessor define. This is required
to avoid gtkdoc warnings. (default '')
cflags -- This parameter specifies any preprocessor flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes all absolute include paths necessary to resolve
all header dependencies. (default '')
ldflags -- This parameter specifies any linker flags necessary for
building the scanner binary during gtkdoc-scanobj. Typically
this includes "-lyourlibraryname". (default '')
library_path -- This parameter specifies the path to the directory where you
library resides used for building the scanner binary during
gtkdoc-scanobj. (default '')
doc_dir -- The path to other documentation files necessary to build
the documentation. This files in this directory as well as
the files in the 'html' subdirectory will be copied
recursively into the output directory. (default '')
main_sgml_file -- The path or name (if a doc_dir is given) of the SGML file
that is the considered the main page of your documentation.
(default: <module_name>-docs.sgml)
version -- The version number of the module. If this is provided,
a version.xml file containing the version will be created
in the output directory during documentation generation.
interactive -- Whether or not errors or warnings should prompt the user
to continue or not. When this value is false, generation
will continue despite warnings. (default False)
virtual_root -- A temporary installation directory which is used as the root
where the actual installation prefix lives; this is mostly
useful for packagers, and should be set to what is given to
make install as DESTDIR.
"""
def __init__(self, args):
# Parameters specific to scanning.
self.module_name = ''
self.source_dirs = []
self.ignored_files = []
self.decorator = ''
self.deprecation_guard = ''
# Parameters specific to gtkdoc-scanobj.
self.cflags = ''
self.ldflags = ''
self.library_path = ''
# Parameters specific to generation.
self.output_dir = ''
self.doc_dir = ''
self.main_sgml_file = ''
# Parameters specific to gtkdoc-fixxref.
self.cross_reference_deps = []
self.interactive = False
self.logger = logging.getLogger('gtkdoc')
for key, value in iter(args.items()):
setattr(self, key, value)
def raise_error_if_not_specified(key):
if not getattr(self, key):
raise Exception('%s not specified.' % key)
raise_error_if_not_specified('output_dir')
raise_error_if_not_specified('source_dirs')
raise_error_if_not_specified('module_name')
# Make all paths absolute in case we were passed relative paths, since
# we change the current working directory when executing subcommands.
self.output_dir = os.path.abspath(self.output_dir)
self.source_dirs = [os.path.abspath(x) for x in self.source_dirs]
if self.library_path:
self.library_path = os.path.abspath(self.library_path)
if not self.main_sgml_file:
self.main_sgml_file = self.module_name + "-docs.sgml"
def generate(self, html=True):
self.saw_warnings = False
self._copy_doc_files_to_output_dir(html)
self._write_version_xml()
self._run_gtkdoc_scan()
self._run_gtkdoc_scangobj()
self._run_gtkdoc_mktmpl()
self._run_gtkdoc_mkdb()
if not html:
return
self._run_gtkdoc_mkhtml()
self._run_gtkdoc_fixxref()
def _delete_file_if_exists(self, path):
if not os.access(path, os.F_OK | os.R_OK):
return
self.logger.debug('deleting %s', path)
os.unlink(path)
def _create_directory_if_nonexistent(self, path):
try:
os.makedirs(path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _raise_exception_if_file_inaccessible(self, path):
if not os.path.exists(path) or not os.access(path, os.R_OK):
raise Exception("Could not access file at: %s" % path)
def _output_has_warnings(self, outputs):
for output in outputs:
if output and output.find('warning'):
return True
return False
def _ask_yes_or_no_question(self, question):
if not self.interactive:
return True
question += ' [y/N] '
answer = None
while answer != 'y' and answer != 'n' and answer != '':
answer = raw_input(question).lower()
return answer == 'y'
def _run_command(self, args, env=None, cwd=None, print_output=True, ignore_warnings=False):
if print_output:
self.logger.info("Running %s", args[0])
self.logger.debug("Full command args: %s", str(args))
process = subprocess.Popen(args, env=env, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = [b.decode("utf-8") for b in process.communicate()]
if print_output:
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
if process.returncode != 0:
raise Exception('%s produced a non-zero return code %i'
% (args[0], process.returncode))
if not ignore_warnings and ('warning' in stderr or 'warning' in stdout):
self.saw_warnings = True
if not self._ask_yes_or_no_question('%s produced warnings, '
'try to continue?' % args[0]):
raise Exception('%s step failed' % args[0])
return stdout.strip()
def _copy_doc_files_to_output_dir(self, html=True):
if not self.doc_dir:
self.logger.info('Not copying any files from doc directory,'
' because no doc directory given.')
return
def copy_file_replacing_existing(src, dest):
if os.path.isdir(src):
self.logger.debug('skipped directory %s', src)
return
if not os.access(src, os.F_OK | os.R_OK):
self.logger.debug('skipped unreadable %s', src)
return
self._delete_file_if_exists(dest)
self.logger.debug('created %s', dest)
os.link(src, dest)
def copy_all_files_in_directory(src, dest):
for path in os.listdir(src):
copy_file_replacing_existing(os.path.join(src, path),
os.path.join(dest, path))
self.logger.info('Copying template files to output directory...')
self._create_directory_if_nonexistent(self.output_dir)
copy_all_files_in_directory(self.doc_dir, self.output_dir)
if not html:
return
self.logger.info('Copying HTML files to output directory...')
html_src_dir = os.path.join(self.doc_dir, 'html')
html_dest_dir = os.path.join(self.output_dir, 'html')
self._create_directory_if_nonexistent(html_dest_dir)
if os.path.exists(html_src_dir):
copy_all_files_in_directory(html_src_dir, html_dest_dir)
def _write_version_xml(self):
if not self.version:
self.logger.info('No version specified, so not writing version.xml')
return
version_xml_path = os.path.join(self.output_dir, 'version.xml')
src_version_xml_path = os.path.join(self.doc_dir, 'version.xml')
# Don't overwrite version.xml if it was in the doc directory.
if os.path.exists(version_xml_path) and \
os.path.exists(src_version_xml_path):
return
output_file = open(version_xml_path, 'w')
output_file.write(self.version)
output_file.close()
def _ignored_files_basenames(self):
return ' '.join([os.path.basename(x) for x in self.ignored_files])
def _run_gtkdoc_scan(self):
args = ['gtkdoc-scan',
'--module=%s' % self.module_name,
'--rebuild-types']
# Each source directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
if self.decorator:
args.append('--ignore-decorators=%s' % self.decorator)
if self.deprecation_guard:
args.append('--deprecated-guards=%s' % self.deprecation_guard)
if self.output_dir:
args.append('--output-dir=%s' % self.output_dir)
# gtkdoc-scan wants the basenames of ignored headers, so strip the
# dirname. Different from "--source-dir", the headers should be
# specified as one long string.
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-headers=%s' % ignored_files_basenames)
self._run_command(args)
def _run_gtkdoc_scangobj(self):
env = os.environ
ldflags = self.ldflags
if self.library_path:
ldflags = ' "-L%s" ' % self.library_path + ldflags
current_ld_library_path = env.get('LD_LIBRARY_PATH')
if current_ld_library_path:
env['RUN'] = 'LD_LIBRARY_PATH="%s:%s" ' % (self.library_path, current_ld_library_path)
else:
env['RUN'] = 'LD_LIBRARY_PATH="%s" ' % self.library_path
if ldflags:
env['LDFLAGS'] = '%s %s' % (ldflags, env.get('LDFLAGS', ''))
if self.cflags:
env['CFLAGS'] = '%s %s' % (self.cflags, env.get('CFLAGS', ''))
if 'CFLAGS' in env:
self.logger.debug('CFLAGS=%s', env['CFLAGS'])
if 'LDFLAGS' in env:
self.logger.debug('LDFLAGS %s', env['LDFLAGS'])
if 'RUN' in env:
self.logger.debug('RUN=%s', env['RUN'])
self._run_command(['gtkdoc-scangobj', '--module=%s' % self.module_name],
env=env, cwd=self.output_dir)
def _run_gtkdoc_mktmpl(self):
args = ['gtkdoc-mktmpl', '--module=%s' % self.module_name]
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkdb(self):
sgml_file = os.path.join(self.output_dir, self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
args = ['gtkdoc-mkdb',
'--module=%s' % self.module_name,
'--main-sgml-file=%s' % sgml_file,
'--source-suffixes=h,c,cpp,cc',
'--output-format=xml',
'--sgml-mode']
ignored_files_basenames = self._ignored_files_basenames()
if ignored_files_basenames:
args.append('--ignore-files=%s' % ignored_files_basenames)
# Each directory should be have its own "--source-dir=" prefix.
args.extend(['--source-dir=%s' % path for path in self.source_dirs])
self._run_command(args, cwd=self.output_dir)
def _run_gtkdoc_mkhtml(self):
html_dest_dir = os.path.join(self.output_dir, 'html')
if not os.path.isdir(html_dest_dir):
raise Exception("%s is not a directory, could not generate HTML"
% html_dest_dir)
elif not os.access(html_dest_dir, os.X_OK | os.R_OK | os.W_OK):
raise Exception("Could not access %s to generate HTML"
% html_dest_dir)
# gtkdoc-mkhtml expects the SGML path to be absolute.
sgml_file = os.path.join(os.path.abspath(self.output_dir),
self.main_sgml_file)
self._raise_exception_if_file_inaccessible(sgml_file)
self._run_command(['gtkdoc-mkhtml', self.module_name, sgml_file],
cwd=html_dest_dir)
def _run_gtkdoc_fixxref(self):
args = ['gtkdoc-fixxref',
'--module-dir=html',
'--html-dir=html']
args.extend(['--extra-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
self._run_command(args, cwd=self.output_dir, ignore_warnings=True)
def rebase_installed_docs(self):
if not os.path.isdir(self.output_dir):
raise Exception("Tried to rebase documentation before generating it.")
html_dir = os.path.join(self.virtual_root + self.prefix, 'share', 'gtk-doc', 'html', self.module_name)
if not os.path.isdir(html_dir):
return
args = ['gtkdoc-rebase',
'--relative',
'--html-dir=%s' % html_dir]
args.extend(['--other-dir=%s' % extra_dir for extra_dir in self.cross_reference_deps])
if self.virtual_root:
args.extend(['--dest-dir=%s' % self.virtual_root])
self._run_command(args, cwd=self.output_dir)
def api_missing_documentation(self):
unused_doc_file = os.path.join(self.output_dir, self.module_name + "-unused.txt")
if not os.path.exists(unused_doc_file) or not os.access(unused_doc_file, os.R_OK):
return []
return open(unused_doc_file).read().splitlines()
class PkgConfigGTKDoc(GTKDoc):
"""Class reads a library's pkgconfig file to guess gtkdoc parameters.
Some gtkdoc parameters can be guessed by reading a library's pkgconfig
file, including the cflags, ldflags and version parameters. If you
provide these parameters as well, they will be appended to the ones
guessed via the pkgconfig file.
Keyword arguments:
pkg_config_path -- Path to the pkgconfig file for the library. Required.
"""
def __init__(self, pkg_config_path, args):
super(PkgConfigGTKDoc, self).__init__(args)
pkg_config = os.environ.get('PKG_CONFIG', 'pkg-config')
if not os.path.exists(pkg_config_path):
raise Exception('Could not find pkg-config file at: %s'
% pkg_config_path)
self.cflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--cflags'], print_output=False)
self.ldflags += " " + self._run_command([pkg_config,
pkg_config_path,
'--libs'], print_output=False)
self.version = self._run_command([pkg_config,
pkg_config_path,
'--modversion'], print_output=False)
self.prefix = self._run_command([pkg_config,
pkg_config_path,
'--variable=prefix'], print_output=False)
| bsd-3-clause |
github-account-because-they-want-it/django-activity-stream | actstream/south_migrations/0006_auto__add_field_action_data.py | 8 | 5977 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from actstream.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Action.data'
db.add_column('actstream_action', 'data', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Action.data'
db.delete_column('actstream_action', 'data')
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'actstream.follow': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Follow'},
'actor_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['actstream']
| bsd-3-clause |
edx/edx-platform | common/djangoapps/student/role_helpers.py | 5 | 1412 | """
Helpers for student roles
"""
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR,
Role
)
from common.djangoapps.student.roles import (
CourseBetaTesterRole,
CourseInstructorRole,
CourseStaffRole,
GlobalStaff,
OrgInstructorRole,
OrgStaffRole
)
def has_staff_roles(user, course_key):
"""
Return true if a user has any of the following roles
Staff, Instructor, Beta Tester, Forum Community TA, Forum Group Moderator, Forum Moderator, Forum Administrator
"""
forum_roles = [FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_GROUP_MODERATOR,
FORUM_ROLE_MODERATOR, FORUM_ROLE_ADMINISTRATOR]
is_staff = CourseStaffRole(course_key).has_user(user)
is_instructor = CourseInstructorRole(course_key).has_user(user)
is_beta_tester = CourseBetaTesterRole(course_key).has_user(user)
is_org_staff = OrgStaffRole(course_key.org).has_user(user)
is_org_instructor = OrgInstructorRole(course_key.org).has_user(user)
is_global_staff = GlobalStaff().has_user(user)
has_forum_role = Role.user_has_role_for_course(user, course_key, forum_roles)
if any([is_staff, is_instructor, is_beta_tester, is_org_staff,
is_org_instructor, is_global_staff, has_forum_role]):
return True
return False
| agpl-3.0 |
ernestask/meson | mesonbuild/backend/xcodebackend.py | 2 | 40565 | # Copyright 2014-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import backends
from .. import build
from .. import dependencies
from .. import mesonlib
import uuid, os, sys
from ..mesonlib import MesonException
class XCodeBackend(backends.Backend):
def __init__(self, build):
super().__init__(build)
self.name = 'xcode'
self.project_uid = self.environment.coredata.guid.replace('-', '')[:24]
self.project_conflist = self.gen_id()
self.indent = ' '
self.indent_level = 0
self.xcodetypemap = {'c': 'sourcecode.c.c',
'a': 'archive.ar',
'cc': 'sourcecode.cpp.cpp',
'cxx': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'c++': 'sourcecode.cpp.cpp',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'h': 'sourcecode.c.h',
'hpp': 'sourcecode.cpp.h',
'hxx': 'sourcecode.cpp.h',
'hh': 'sourcecode.cpp.hh',
'inc': 'sourcecode.c.h',
'dylib': 'compiled.mach-o.dylib',
'o': 'compiled.mach-o.objfile',
}
self.maingroup_id = self.gen_id()
self.all_id = self.gen_id()
self.all_buildconf_id = self.gen_id()
self.buildtypes = ['debug']
self.test_id = self.gen_id()
self.test_command_id = self.gen_id()
self.test_buildconf_id = self.gen_id()
def gen_id(self):
return str(uuid.uuid4()).upper().replace('-', '')[:24]
def get_target_dir(self, target):
dirname = os.path.join(target.get_subdir(), self.environment.coredata.get_builtin_option('buildtype'))
os.makedirs(os.path.join(self.environment.get_build_dir(), dirname), exist_ok=True)
return dirname
def write_line(self, text):
self.ofile.write(self.indent * self.indent_level + text)
if not text.endswith('\n'):
self.ofile.write('\n')
def generate(self, interp):
self.interpreter = interp
test_data = self.serialize_tests()[0]
self.generate_filemap()
self.generate_buildmap()
self.generate_buildstylemap()
self.generate_build_phase_map()
self.generate_build_configuration_map()
self.generate_build_configurationlist_map()
self.generate_project_configurations_map()
self.generate_buildall_configurations_map()
self.generate_test_configurations_map()
self.generate_native_target_map()
self.generate_native_frameworks_map()
self.generate_source_phase_map()
self.generate_target_dependency_map()
self.generate_pbxdep_map()
self.generate_containerproxy_map()
self.proj_dir = os.path.join(self.environment.get_build_dir(), self.build.project_name + '.xcodeproj')
os.makedirs(self.proj_dir, exist_ok=True)
self.proj_file = os.path.join(self.proj_dir, 'project.pbxproj')
with open(self.proj_file, 'w') as self.ofile:
self.generate_prefix()
self.generate_pbx_aggregate_target()
self.generate_pbx_build_file()
self.generate_pbx_build_style()
self.generate_pbx_container_item_proxy()
self.generate_pbx_file_reference()
self.generate_pbx_frameworks_buildphase()
self.generate_pbx_group()
self.generate_pbx_native_target()
self.generate_pbx_project()
self.generate_pbx_shell_build_phase(test_data)
self.generate_pbx_sources_build_phase()
self.generate_pbx_target_dependency()
self.generate_xc_build_configuration()
self.generate_xc_configurationList()
self.generate_suffix()
def get_xcodetype(self, fname):
return self.xcodetypemap[fname.split('.')[-1]]
def generate_filemap(self):
self.filemap = {} # Key is source file relative to src root.
self.target_filemap = {}
for name, t in self.build.targets.items():
for s in t.sources:
if isinstance(s, mesonlib.File):
s = os.path.join(s.subdir, s.fname)
self.filemap[s] = self.gen_id()
for o in t.objects:
if isinstance(o, str):
o = os.path.join(t.subdir, o)
self.filemap[o] = self.gen_id()
self.target_filemap[name] = self.gen_id()
def generate_buildmap(self):
self.buildmap = {}
for t in self.build.targets.values():
for s in t.sources:
s = os.path.join(s.subdir, s.fname)
self.buildmap[s] = self.gen_id()
for o in t.objects:
o = os.path.join(t.subdir, o)
if isinstance(o, str):
self.buildmap[o] = self.gen_id()
def generate_buildstylemap(self):
self.buildstylemap = {'debug': self.gen_id()}
def generate_build_phase_map(self):
for tname, t in self.build.targets.items():
# generate id for our own target-name
t.buildphasemap = {}
t.buildphasemap[tname] = self.gen_id()
# each target can have it's own Frameworks/Sources/..., generate id's for those
t.buildphasemap['Frameworks'] = self.gen_id()
t.buildphasemap['Resources'] = self.gen_id()
t.buildphasemap['Sources'] = self.gen_id()
def generate_build_configuration_map(self):
self.buildconfmap = {}
for t in self.build.targets:
bconfs = {'debug': self.gen_id()}
self.buildconfmap[t] = bconfs
def generate_project_configurations_map(self):
self.project_configurations = {'debug': self.gen_id()}
def generate_buildall_configurations_map(self):
self.buildall_configurations = {'debug': self.gen_id()}
def generate_test_configurations_map(self):
self.test_configurations = {'debug': self.gen_id()}
def generate_build_configurationlist_map(self):
self.buildconflistmap = {}
for t in self.build.targets:
self.buildconflistmap[t] = self.gen_id()
def generate_native_target_map(self):
self.native_targets = {}
for t in self.build.targets:
self.native_targets[t] = self.gen_id()
def generate_native_frameworks_map(self):
self.native_frameworks = {}
self.native_frameworks_fileref = {}
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.native_frameworks[f] = self.gen_id()
self.native_frameworks_fileref[f] = self.gen_id()
def generate_target_dependency_map(self):
self.target_dependency_map = {}
for tname, t in self.build.targets.items():
for target in t.link_targets:
self.target_dependency_map[(tname, target.get_basename())] = self.gen_id()
def generate_pbxdep_map(self):
self.pbx_dep_map = {}
for t in self.build.targets:
self.pbx_dep_map[t] = self.gen_id()
def generate_containerproxy_map(self):
self.containerproxy_map = {}
for t in self.build.targets:
self.containerproxy_map[t] = self.gen_id()
def generate_source_phase_map(self):
self.source_phase = {}
for t in self.build.targets:
self.source_phase[t] = self.gen_id()
def generate_pbx_aggregate_target(self):
self.ofile.write('\n/* Begin PBXAggregateTarget section */\n')
self.write_line('%s /* ALL_BUILD */ = {' % self.all_id)
self.indent_level += 1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s;' % self.all_buildconf_id)
self.write_line('buildPhases = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* PBXTargetDependency */,' % self.pbx_dep_map[t])
self.indent_level -= 1
self.write_line(');')
self.write_line('name = ALL_BUILD;')
self.write_line('productName = ALL_BUILD;')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* RUN_TESTS */ = {' % self.test_id)
self.indent_level += 1
self.write_line('isa = PBXAggregateTarget;')
self.write_line('buildConfigurationList = %s;' % self.test_buildconf_id)
self.write_line('buildPhases = (')
self.indent_level += 1
self.write_line('%s /* test run command */,' % self.test_command_id)
self.indent_level -= 1
self.write_line(');')
self.write_line('dependencies = (')
self.write_line(');')
self.write_line('name = RUN_TESTS;')
self.write_line('productName = RUN_TESTS;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXAggregateTarget section */\n')
def generate_pbx_build_file(self):
self.ofile.write('\n/* Begin PBXBuildFile section */\n')
templ = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */; settings = { COMPILER_FLAGS = "%s"; }; };\n'
otempl = '%s /* %s */ = { isa = PBXBuildFile; fileRef = %s /* %s */;};\n'
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.ofile.write('%s /* %s.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = %s /* %s.framework */; };\n' % (self.native_frameworks[f], f, self.native_frameworks_fileref[f], f))
for s in t.sources:
if isinstance(s, mesonlib.File):
s = s.fname
if isinstance(s, str):
s = os.path.join(t.subdir, s)
idval = self.buildmap[s]
fullpath = os.path.join(self.environment.get_source_dir(), s)
fileref = self.filemap[s]
fullpath2 = fullpath
compiler_args = ''
self.ofile.write(templ % (idval, fullpath, fileref, fullpath2, compiler_args))
for o in t.objects:
o = os.path.join(t.subdir, o)
idval = self.buildmap[o]
fileref = self.filemap[o]
fullpath = os.path.join(self.environment.get_source_dir(), o)
fullpath2 = fullpath
self.ofile.write(otempl % (idval, fullpath, fileref, fullpath2))
self.ofile.write('/* End PBXBuildFile section */\n')
def generate_pbx_build_style(self):
self.ofile.write('\n/* Begin PBXBuildStyle section */\n')
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */ = {\n' % (idval, name))
self.indent_level += 1
self.write_line('isa = PBXBuildStyle;\n')
self.write_line('buildSettings = {\n')
self.indent_level += 1
self.write_line('COPY_PHASE_STRIP = NO;\n')
self.indent_level -= 1
self.write_line('};\n')
self.write_line('name = "%s";\n' % name)
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXBuildStyle section */\n')
def generate_pbx_container_item_proxy(self):
self.ofile.write('\n/* Begin PBXContainerItemProxy section */\n')
for t in self.build.targets:
self.write_line('%s /* PBXContainerItemProxy */ = {' % self.containerproxy_map[t])
self.indent_level += 1
self.write_line('isa = PBXContainerItemProxy;')
self.write_line('containerPortal = %s /* Project object */;' % self.project_uid)
self.write_line('proxyType = 1;')
self.write_line('remoteGlobalIDString = %s;' % self.native_targets[t])
self.write_line('remoteInfo = "%s";' % t)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXContainerItemProxy section */\n')
def generate_pbx_file_reference(self):
self.ofile.write('\n/* Begin PBXFileReference section */\n')
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.ofile.write('%s /* %s.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = %s.framework; path = System/Library/Frameworks/%s.framework; sourceTree = SDKROOT; };\n' % (self.native_frameworks_fileref[f], f, f, f))
src_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; fileEncoding = 4; name = "%s"; path = "%s"; sourceTree = SOURCE_ROOT; };\n'
for fname, idval in self.filemap.items():
fullpath = os.path.join(self.environment.get_source_dir(), fname)
xcodetype = self.get_xcodetype(fname)
name = os.path.split(fname)[-1]
path = fname
self.ofile.write(src_templ % (idval, fullpath, xcodetype, name, path))
target_templ = '%s /* %s */ = { isa = PBXFileReference; explicitFileType = "%s"; path = %s; refType = %d; sourceTree = BUILT_PRODUCTS_DIR; };\n'
for tname, idval in self.target_filemap.items():
t = self.build.targets[tname]
fname = t.get_filename()
reftype = 0
if isinstance(t, build.Executable):
typestr = 'compiled.mach-o.executable'
path = fname
elif isinstance(t, build.SharedLibrary):
typestr = self.get_xcodetype('dummy.dylib')
path = fname
else:
typestr = self.get_xcodetype(fname)
path = '"%s"' % t.get_filename()
self.ofile.write(target_templ % (idval, tname, typestr, path, reftype))
self.ofile.write('/* End PBXFileReference section */\n')
def generate_pbx_frameworks_buildphase(self):
for tname, t in self.build.targets.items():
self.ofile.write('\n/* Begin PBXFrameworksBuildPhase section */\n')
self.indent_level += 1
self.write_line('%s /* %s */ = {\n' % (t.buildphasemap['Frameworks'], 'Frameworks'))
self.indent_level += 1
self.write_line('isa = PBXFrameworksBuildPhase;\n')
self.write_line('buildActionMask = %s;\n' % (2147483647))
self.write_line('files = (\n')
self.indent_level += 1
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework in Frameworks */,\n' % (self.native_frameworks[f], f))
self.indent_level -= 1
self.write_line(');\n')
self.write_line('runOnlyForDeploymentPostprocessing = 0;\n')
self.indent_level -= 1
self.write_line('};\n')
self.ofile.write('/* End PBXFrameworksBuildPhase section */\n')
def generate_pbx_group(self):
groupmap = {}
target_src_map = {}
for t in self.build.targets:
groupmap[t] = self.gen_id()
target_src_map[t] = self.gen_id()
self.ofile.write('\n/* Begin PBXGroup section */\n')
sources_id = self.gen_id()
resources_id = self.gen_id()
products_id = self.gen_id()
frameworks_id = self.gen_id()
self.write_line('%s = {' % self.maingroup_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
self.write_line('%s /* Sources */,' % sources_id)
self.write_line('%s /* Resources */,' % resources_id)
self.write_line('%s /* Products */,' % products_id)
self.write_line('%s /* Frameworks */,' % frameworks_id)
self.indent_level -= 1
self.write_line(');')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Sources
self.write_line('%s /* Sources */ = {' % sources_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (groupmap[t], t))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Sources;')
self.write_line('sourcetree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Resources */ = {' % resources_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.write_line(');')
self.write_line('name = Resources;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Frameworks */ = {' % frameworks_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
# write frameworks
self.indent_level += 1
for t in self.build.targets.values():
for dep in t.get_external_deps():
if isinstance(dep, dependencies.AppleFrameworks):
for f in dep.frameworks:
self.write_line('%s /* %s.framework */,\n' % (self.native_frameworks_fileref[f], f))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Frameworks;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# Targets
for t in self.build.targets:
self.write_line('%s /* %s */ = {' % (groupmap[t], t))
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
self.write_line('%s /* Source files */,' % target_src_map[t])
self.indent_level -= 1
self.write_line(');')
self.write_line('name = "%s";' % t)
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.write_line('%s /* Source files */ = {' % target_src_map[t])
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for s in self.build.targets[t].sources:
s = os.path.join(s.subdir, s.fname)
if isinstance(s, str):
self.write_line('%s /* %s */,' % (self.filemap[s], s))
for o in self.build.targets[t].objects:
o = os.path.join(self.build.targets[t].subdir, o)
self.write_line('%s /* %s */,' % (self.filemap[o], o))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = "Source files";')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
# And finally products
self.write_line('%s /* Products */ = {' % products_id)
self.indent_level += 1
self.write_line('isa = PBXGroup;')
self.write_line('children = (')
self.indent_level += 1
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.target_filemap[t], t))
self.indent_level -= 1
self.write_line(');')
self.write_line('name = Products;')
self.write_line('sourceTree = "<group>";')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXGroup section */\n')
def generate_pbx_native_target(self):
self.ofile.write('\n/* Begin PBXNativeTarget section */\n')
for tname, idval in self.native_targets.items():
t = self.build.targets[tname]
self.write_line('%s /* %s */ = {' % (idval, tname))
self.indent_level += 1
self.write_line('isa = PBXNativeTarget;')
self.write_line('buildConfigurationList = %s /* Build configuration list for PBXNativeTarget "%s" */;'
% (self.buildconflistmap[tname], tname))
self.write_line('buildPhases = (')
self.indent_level += 1
for bpname, bpval in t.buildphasemap.items():
self.write_line('%s /* %s yyy */,' % (bpval, bpname))
self.indent_level -= 1
self.write_line(');')
self.write_line('buildRules = (')
self.write_line(');')
self.write_line('dependencies = (')
self.indent_level += 1
for lt in self.build.targets[tname].link_targets:
# NOT DOCUMENTED, may need to make different links
# to same target have different targetdependency item.
idval = self.pbx_dep_map[lt.get_id()]
self.write_line('%s /* PBXTargetDependency */,' % idval)
self.indent_level -= 1
self.write_line(");")
self.write_line('name = "%s";' % tname)
self.write_line('productName = "%s";' % tname)
self.write_line('productReference = %s /* %s */;' % (self.target_filemap[tname], tname))
if isinstance(t, build.Executable):
typestr = 'com.apple.product-type.tool'
elif isinstance(t, build.StaticLibrary):
typestr = 'com.apple.product-type.library.static'
elif isinstance(t, build.SharedLibrary):
typestr = 'com.apple.product-type.library.dynamic'
else:
raise MesonException('Unknown target type for %s' % tname)
self.write_line('productType = "%s";' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXNativeTarget section */\n')
def generate_pbx_project(self):
self.ofile.write('\n/* Begin PBXProject section */\n')
self.write_line('%s /* Project object */ = {' % self.project_uid)
self.indent_level += 1
self.write_line('isa = PBXProject;')
self.write_line('attributes = {')
self.indent_level += 1
self.write_line('BuildIndependentTargetsInParallel = YES;')
self.indent_level -= 1
self.write_line('};')
conftempl = 'buildConfigurationList = %s /* build configuration list for PBXProject "%s"*/;'
self.write_line(conftempl % (self.project_conflist, self.build.project_name))
self.write_line('buildSettings = {')
self.write_line('};')
self.write_line('buildStyles = (')
self.indent_level += 1
for name, idval in self.buildstylemap.items():
self.write_line('%s /* %s */,' % (idval, name))
self.indent_level -= 1
self.write_line(');')
self.write_line('compatibilityVersion = "Xcode 3.2";')
self.write_line('hasScannedForEncodings = 0;')
self.write_line('mainGroup = %s;' % self.maingroup_id)
self.write_line('projectDirPath = "%s";' % self.build_to_src)
self.write_line('projectRoot = "";')
self.write_line('targets = (')
self.indent_level += 1
self.write_line('%s /* ALL_BUILD */,' % self.all_id)
self.write_line('%s /* RUN_TESTS */,' % self.test_id)
for t in self.build.targets:
self.write_line('%s /* %s */,' % (self.native_targets[t], t))
self.indent_level -= 1
self.write_line(');')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXProject section */\n')
def generate_pbx_shell_build_phase(self, test_data):
self.ofile.write('\n/* Begin PBXShellScriptBuildPhase section */\n')
self.write_line('%s = {' % self.test_command_id)
self.indent_level += 1
self.write_line('isa = PBXShellScriptBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.write_line(');')
self.write_line('inputPaths = (')
self.write_line(');')
self.write_line('outputPaths = (')
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.write_line('shellPath = /bin/sh;')
script_root = self.environment.get_script_dir()
test_script = os.path.join(script_root, 'meson_test.py')
cmd = mesonlib.python_command + [test_script, test_data, '--wd', self.environment.get_build_dir()]
cmdstr = ' '.join(["'%s'" % i for i in cmd])
self.write_line('shellScript = "%s";' % cmdstr)
self.write_line('showEnvVarsInLog = 0;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXShellScriptBuildPhase section */\n')
def generate_pbx_sources_build_phase(self):
self.ofile.write('\n/* Begin PBXSourcesBuildPhase section */\n')
for name, phase_id in self.source_phase.items():
t = self.build.targets[name]
self.write_line('%s /* Sources */ = {' % (t.buildphasemap[name]))
self.indent_level += 1
self.write_line('isa = PBXSourcesBuildPhase;')
self.write_line('buildActionMask = 2147483647;')
self.write_line('files = (')
self.indent_level += 1
for s in self.build.targets[name].sources:
s = os.path.join(s.subdir, s.fname)
if not self.environment.is_header(s):
self.write_line('%s /* %s */,' % (self.buildmap[s], os.path.join(self.environment.get_source_dir(), s)))
self.indent_level -= 1
self.write_line(');')
self.write_line('runOnlyForDeploymentPostprocessing = 0;')
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXSourcesBuildPhase section */\n')
def generate_pbx_target_dependency(self):
self.ofile.write('\n/* Begin PBXTargetDependency section */\n')
for t in self.build.targets:
idval = self.pbx_dep_map[t] # VERIFY: is this correct?
self.write_line('%s /* PBXTargetDependency */ = {' % idval)
self.indent_level += 1
self.write_line('isa = PBXTargetDependency;')
self.write_line('target = %s /* %s */;' % (self.native_targets[t], t))
self.write_line('targetProxy = %s /* PBXContainerItemProxy */;' % self.containerproxy_map[t])
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End PBXTargetDependency section */\n')
def generate_xc_build_configuration(self):
self.ofile.write('\n/* Begin XCBuildConfiguration section */\n')
# First the setup for the toplevel project.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.project_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";')
self.write_line('ONLY_ACTIVE_ARCH = YES;')
self.write_line('SDKROOT = "macosx";')
self.write_line('SYMROOT = "%s/build";' % self.environment.get_build_dir())
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Then the all target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = ALL_BUILD;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Then the test target.
for buildtype in self.buildtypes:
self.write_line('%s /* %s */ = {' % (self.test_configurations[buildtype], buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = NO;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
self.write_line('INSTALL_PATH = "";')
self.write_line('OTHER_CFLAGS = " ";')
self.write_line('OTHER_LDFLAGS = " ";')
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = RUN_TESTS;')
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % self.environment.get_build_dir())
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
# Now finally targets.
langnamemap = {'c': 'C', 'cpp': 'CPLUSPLUS', 'objc': 'OBJC', 'objcpp': 'OBJCPLUSPLUS'}
for target_name, target in self.build.targets.items():
for buildtype in self.buildtypes:
dep_libs = []
links_dylib = False
headerdirs = []
for d in target.include_dirs:
for sd in d.incdirs:
cd = os.path.join(d.curdir, sd)
headerdirs.append(os.path.join(self.environment.get_source_dir(), cd))
headerdirs.append(os.path.join(self.environment.get_build_dir(), cd))
for l in target.link_targets:
abs_path = os.path.join(self.environment.get_build_dir(),
l.subdir, buildtype, l.get_filename())
dep_libs.append("'%s'" % abs_path)
if isinstance(l, build.SharedLibrary):
links_dylib = True
if links_dylib:
dep_libs = ['-Wl,-search_paths_first', '-Wl,-headerpad_max_install_names'] + dep_libs
dylib_version = None
if isinstance(target, build.SharedLibrary):
ldargs = ['-dynamiclib', '-Wl,-headerpad_max_install_names'] + dep_libs
install_path = os.path.join(self.environment.get_build_dir(), target.subdir, buildtype)
dylib_version = target.version
else:
ldargs = dep_libs
install_path = ''
if dylib_version is not None:
product_name = target.get_basename() + '.' + dylib_version
else:
product_name = target.get_basename()
ldargs += target.link_args
ldstr = ' '.join(ldargs)
valid = self.buildconfmap[target_name][buildtype]
langargs = {}
for lang in self.environment.coredata.compilers:
if lang not in langnamemap:
continue
gargs = self.build.global_args.get(lang, [])
targs = target.get_extra_args(lang)
args = gargs + targs
if len(args) > 0:
langargs[langnamemap[lang]] = args
symroot = os.path.join(self.environment.get_build_dir(), target.subdir)
self.write_line('%s /* %s */ = {' % (valid, buildtype))
self.indent_level += 1
self.write_line('isa = XCBuildConfiguration;')
self.write_line('buildSettings = {')
self.indent_level += 1
self.write_line('COMBINE_HIDPI_IMAGES = YES;')
if dylib_version is not None:
self.write_line('DYLIB_CURRENT_VERSION = "%s";' % dylib_version)
self.write_line('EXECUTABLE_PREFIX = "%s";' % target.prefix)
if target.suffix == '':
suffix = ''
else:
suffix = '.' + target.suffix
self.write_line('EXECUTABLE_SUFFIX = "%s";' % suffix)
self.write_line('GCC_GENERATE_DEBUGGING_SYMBOLS = YES;')
self.write_line('GCC_INLINES_ARE_PRIVATE_EXTERN = NO;')
self.write_line('GCC_OPTIMIZATION_LEVEL = 0;')
self.write_line('GCC_PREPROCESSOR_DEFINITIONS = ("");')
self.write_line('GCC_SYMBOLS_PRIVATE_EXTERN = NO;')
if len(headerdirs) > 0:
quotedh = ','.join(['"\\"%s\\""' % i for i in headerdirs])
self.write_line('HEADER_SEARCH_PATHS=(%s);' % quotedh)
self.write_line('INSTALL_PATH = "%s";' % install_path)
self.write_line('LIBRARY_SEARCH_PATHS = "";')
if isinstance(target, build.SharedLibrary):
self.write_line('LIBRARY_STYLE = DYNAMIC;')
for langname, args in langargs.items():
argstr = ' '.join(args)
self.write_line('OTHER_%sFLAGS = "%s";' % (langname, argstr))
self.write_line('OTHER_LDFLAGS = "%s";' % ldstr)
self.write_line('OTHER_REZFLAGS = "";')
self.write_line('PRODUCT_NAME = %s;' % product_name)
self.write_line('SECTORDER_FLAGS = "";')
self.write_line('SYMROOT = "%s";' % symroot)
self.write_line('USE_HEADERMAP = NO;')
self.write_line('WARNING_CFLAGS = ("-Wmost", "-Wno-four-char-constants", "-Wno-unknown-pragmas", );')
self.indent_level -= 1
self.write_line('};')
self.write_line('name = "%s";' % buildtype)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCBuildConfiguration section */\n')
def generate_xc_configurationList(self):
self.ofile.write('\n/* Begin XCConfigurationList section */\n')
self.write_line('%s /* Build configuration list for PBXProject "%s" */ = {' % (self.project_conflist, self.build.project_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.project_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
# Now the all target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.all_buildconf_id)
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.buildall_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
# Test target
self.write_line('%s /* Build configuration list for PBXAggregateTarget "ALL_BUILD" */ = {' % self.test_buildconf_id)
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
for buildtype in self.buildtypes:
self.write_line('%s /* %s */,' % (self.test_configurations[buildtype], buildtype))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = debug;')
self.indent_level -= 1
self.write_line('};')
for target_name in self.build.targets:
listid = self.buildconflistmap[target_name]
self.write_line('%s /* Build configuration list for PBXNativeTarget "%s" */ = {' % (listid, target_name))
self.indent_level += 1
self.write_line('isa = XCConfigurationList;')
self.write_line('buildConfigurations = (')
self.indent_level += 1
typestr = 'debug'
idval = self.buildconfmap[target_name][typestr]
self.write_line('%s /* %s */,' % (idval, typestr))
self.indent_level -= 1
self.write_line(');')
self.write_line('defaultConfigurationIsVisible = 0;')
self.write_line('defaultConfigurationName = "%s";' % typestr)
self.indent_level -= 1
self.write_line('};')
self.ofile.write('/* End XCConfigurationList section */\n')
def generate_prefix(self):
self.ofile.write('// !$*UTF8*$!\n{\n')
self.indent_level += 1
self.write_line('archiveVersion = 1;\n')
self.write_line('classes = {\n')
self.write_line('};\n')
self.write_line('objectVersion = 46;\n')
self.write_line('objects = {\n')
self.indent_level += 1
def generate_suffix(self):
self.indent_level -= 1
self.write_line('};\n')
self.write_line('rootObject = ' + self.project_uid + ';')
self.indent_level -= 1
self.write_line('}\n')
| apache-2.0 |
berfinsari/metricbeat | membeat/vendor/github.com/elastic/beats/packetbeat/tests/system/gen/memcache/mc.py | 15 | 1393 |
from contextlib import (contextmanager)
import argparse
import sys
import pylibmc
def parse_args(args=None):
p = argparse.ArgumentParser()
p.add_argument('--protocol', '-p', default='text',
help="choose protocol type. One of text or bin")
p.add_argument('--remote', '-r', default='127.0.0.1:11211',
help="remote server address")
return p.parse_args(sys.argv[1:] if args is None else args)
def connect_tcp(opts=None):
opts = opts or parse_args()
opts.transport = 'tcp'
return connect(opts)
def connect_udp(opts=None):
opts = opts or parse_args()
opts.transport = 'udp'
return connect(opts)
def connect(opts):
if opts.transport == 'udp':
addr = 'udp:' + opts.remote
else:
addr = opts.remote
return pylibmc.Client([addr],
binary=opts.protocol == 'bin')
def make_connect_cmd(con):
def go(opts=None):
mc = con(opts)
try:
yield mc
finally:
mc.disconnect_all()
return contextmanager(go)
def make_run(con):
def go(fn, opts=None):
with con() as mc:
fn(mc)
return go
connection = make_connect_cmd(connect)
tcp_connection = make_connect_cmd(connect_tcp)
udp_connection = make_connect_cmd(connect_udp)
run_tcp = make_run(tcp_connection)
run_udp = make_run(udp_connection)
| gpl-3.0 |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/pypy/lib-python/2.7/lib2to3/pgen2/driver.py | 98 | 4694 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import StringIO
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
| agpl-3.0 |
dragonfi/snowfall | pyglet-1.1.4/tests/font/ADD_FONT.py | 12 | 1603 | #!/usr/bin/env python
'''Test that a font distributed with the application can be displayed.
Four lines of text should be displayed, each in a different variant
(bold/italic/regular) of Action Man at 24pt. The Action Man fonts are
included in the test directory (tests/font) as action_man*.ttf.
Press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import unittest
from pyglet import font
import base_text
base_path = os.path.dirname(__file__)
class TEST_ADD_FONT(base_text.TextTestBase):
font_name = 'Action Man'
def render(self):
font.add_file(os.path.join(base_path, 'action_man.ttf'))
font.add_file(os.path.join(base_path, 'action_man_bold.ttf'))
font.add_file(os.path.join(base_path, 'action_man_italic.ttf'))
font.add_file(os.path.join(base_path, 'action_man_bold_italic.ttf'))
fnt = font.load('Action Man', self.font_size)
fnt_b = font.load('Action Man', self.font_size, bold=True)
fnt_i = font.load('Action Man', self.font_size, italic=True)
fnt_bi = font.load('Action Man', self.font_size, bold=True, italic=True)
h = fnt.ascent - fnt.descent
self.labels = [
font.Text(fnt, 'Action Man', 10, 10 + 3 * h),
font.Text(fnt_i, 'Action Man Italic', 10, 10 + 2 * h),
font.Text(fnt_b, 'Action Man Bold', 10, 10 + h),
font.Text(fnt_bi, 'Action Man Bold Italic', 10, 10)
]
def draw(self):
for label in self.labels:
label.draw()
if __name__ == '__main__':
unittest.main()
| mit |
stscieisenhamer/ginga | ginga/tkw/TkHelp.py | 1 | 1380 | #
# TkHelp.py -- help module for Ginga Tk backend
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
class Timer(object):
"""Abstraction of a GUI-toolkit implemented timer."""
def __init__(self, ival_sec, expire_cb, data=None, tkcanvas=None):
"""Create a timer set to expire after `ival_sec` and which will
call the callable `expire_cb` when it expires.
"""
self.ival_sec = ival_sec
self.cb = expire_cb
self.data = data
self.tkcanvas = tkcanvas
self._timer = None
def start(self, ival_sec=None):
"""Start the timer. If `ival_sec` is not None, it should
specify the time to expiration in seconds.
"""
if ival_sec is None:
ival_sec = self.ival_sec
self.cancel()
# Tk timer set in milliseconds
time_ms = int(ival_sec * 1000.0)
self._timer = self.tkcanvas.after(time_ms, self._redirect_cb)
def _redirect_cb(self):
self._timer = None
self.cb(self)
def cancel(self):
"""Cancel this timer. If the timer is not running, there
is no error.
"""
try:
if self._timer is not None:
self.tkcanvas.after_cancel(self._timer)
self._timer = None
except:
pass
| bsd-3-clause |
12019/cyberflex-shell | tests/utilstest.py | 2 | 1220 | """Unit test for utils.py"""
import utils
import unittest
class APDUCase1Tests(unittest.TestCase):
def setUp(self):
self.a4 = utils.C_APDU("\x00\xa4\x00\x00")
def tearDown(self):
del self.a4
def testCreation(self):
self.assertEqual(0, self.a4.CLA)
self.assertEqual(0xa4, self.a4.INS)
self.assertEqual(0, self.a4.P1)
self.assertEqual(0, self.a4.P2)
def testRender(self):
self.assertEqual("\x00\xa4\x00\x00", self.a4.render())
def testCopy(self):
b0 = utils.C_APDU(self.a4, INS=0xb0)
self.assertEqual("\x00\xb0\x00\x00", b0.render())
def testAssign(self):
self.a4.p2 = 5
self.assertEqual(5, self.a4.P2)
self.assertEqual("\x00\xa4\x00\x05", self.a4.render())
def testCreateSequence(self):
a4_2 = utils.C_APDU(0, 0xa4, 0, 0)
self.assertEqual(self.a4.render(), a4_2.render())
class APDUChainTests(unittest.TestCase):
def testChain(self):
a = utils.R_APDU("abcd\x61\x04")
b = utils.R_APDU("efgh\x90\x00")
c = a.append(b)
self.assertEqual("abcdefgh\x90\x00", c.render())
| gpl-2.0 |
thaumos/ansible | test/units/modules/network/slxos/test_slxos_interface.py | 38 | 4938 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.slxos import slxos_interface
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosInterfaceModule(TestSlxosModule):
module = slxos_interface
def setUp(self):
super(TestSlxosInterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_interface.load_config'
)
self._patch_exec_command = patch(
'ansible.modules.network.slxos.slxos_interface.exec_command'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._exec_command = self._patch_exec_command.start()
def tearDown(self):
super(TestSlxosInterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_exec_command.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_interface_description(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
description='show version'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'description show version'
],
'changed': True
}
)
def test_slxos_interface_speed(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
speed=1000
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'speed 1000'
],
'changed': True
}
)
def test_slxos_interface_mtu(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=1548
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'mtu 1548'
],
'changed': True
}
)
def test_slxos_interface_mtu_out_of_range(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=15000
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'mtu must be between 1548 and 9216',
'failed': True
}
)
def test_slxos_interface_enabled(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
enabled=True
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/1',
'no shutdown'
],
'changed': True
}
)
def test_slxos_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: aggregate, '
'delay, description, enabled, mtu, name, neighbors, '
'rx_rate, speed, state, tx_rate',
result['msg']
))
| gpl-3.0 |
Bismarrck/tensorflow | tensorflow/lite/experimental/examples/lstm/unidirectional_sequence_lstm_test.py | 9 | 8637 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.tflite_lstm import TFLiteLSTMCell
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Lstm Units.
self.num_units = 16
def buildLstmLayer(self):
return tf.nn.rnn_cell.MultiRNNCell([
TFLiteLSTMCell(
self.num_units, use_peepholes=True, forget_bias=0, name="rnn1"),
TFLiteLSTMCell(self.num_units, num_proj=8, forget_bias=0, name="rnn2"),
TFLiteLSTMCell(
self.num_units // 2,
use_peepholes=True,
num_proj=8,
forget_bias=0,
name="rnn3"),
TFLiteLSTMCell(self.num_units, forget_bias=0, name="rnn4")
])
def buildModel(self, lstm_layer, is_dynamic_rnn, is_train):
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# For dynamic_rnn, train with dynamic_rnn and inference with static_rnn.
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
if is_train:
lstm_input = x
outputs, _ = tf.nn.dynamic_rnn(lstm_layer, lstm_input, dtype="float32")
outputs = tf.unstack(outputs, axis=1)
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
else:
lstm_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, lstm_layer, sess, saver, is_dynamic_rnn):
model_dir = tempfile.mkdtemp()
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
lstm_layer, is_dynamic_rnn, is_train=False)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, [output_class.op.name])
return sample_input, expected_output, frozen_graph
def tfliteInvoke(self, graph, test_inputs, outputs):
tf.reset_default_graph()
# Turn the input into placeholder of shape 1
tflite_input = tf.placeholder(
"float", [1, self.time_steps, self.n_input], name="INPUT_IMAGE_LITE")
tf.import_graph_def(graph, name="", input_map={"INPUT_IMAGE": tflite_input})
with tf.Session() as sess:
curr = sess.graph_def
curr = convert_op_hints_to_stubs(graph_def=curr)
curr = optimize_for_inference_lib.optimize_for_inference(
curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
[tf.float32.as_datatype_enum])
tflite = tf.lite.toco_convert(
curr, [tflite_input], [outputs], allow_custom_ops=False)
interpreter = tf.lite.Interpreter(model_content=tflite)
try:
interpreter.allocate_tensors()
except ValueError:
assert False
input_index = (interpreter.get_input_details()[0]["index"])
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = (interpreter.get_output_details()[0]["index"])
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=False, is_train=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output, frozen_graph = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(frozen_graph, test_inputs, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildLstmLayer(), is_dynamic_rnn=True, is_train=True)
self.trainModel(x, prediction, output_class, sess)
# Since we don't yet support OpHints for dynamic, we will load the model
# back in as a static model. This requires the variables to have the same
# names as if they were trained as a static. Thus, we get rid of while/rnn
# names.
variables_to_save = {}
for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
op_name = i.name
if op_name.startswith("while/rnn/"):
op_name = op_name.split("while/rnn/")[1]
if op_name.endswith(":0"):
op_name = op_name.split(":0")[0]
variables_to_save[op_name] = i
saver = tf.train.Saver(variables_to_save)
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildLstmLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output, frozen_graph = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(frozen_graph, test_inputs, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
| apache-2.0 |
partofthething/home-assistant | homeassistant/components/nissan_leaf/__init__.py | 1 | 17730 | """Support for the Nissan Leaf Carwings/Nissan Connect API."""
import asyncio
from datetime import datetime, timedelta
import logging
import sys
from pycarwings2 import CarwingsError, Session
import voluptuous as vol
from homeassistant.const import CONF_PASSWORD, CONF_REGION, CONF_USERNAME, HTTP_OK
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
DOMAIN = "nissan_leaf"
DATA_LEAF = "nissan_leaf_data"
DATA_BATTERY = "battery"
DATA_CHARGING = "charging"
DATA_PLUGGED_IN = "plugged_in"
DATA_CLIMATE = "climate"
DATA_RANGE_AC = "range_ac_on"
DATA_RANGE_AC_OFF = "range_ac_off"
CONF_INTERVAL = "update_interval"
CONF_CHARGING_INTERVAL = "update_interval_charging"
CONF_CLIMATE_INTERVAL = "update_interval_climate"
CONF_VALID_REGIONS = ["NNA", "NE", "NCI", "NMA", "NML"]
CONF_FORCE_MILES = "force_miles"
INITIAL_UPDATE = timedelta(seconds=15)
MIN_UPDATE_INTERVAL = timedelta(minutes=2)
DEFAULT_INTERVAL = timedelta(hours=1)
DEFAULT_CHARGING_INTERVAL = timedelta(minutes=15)
DEFAULT_CLIMATE_INTERVAL = timedelta(minutes=5)
RESTRICTED_BATTERY = 2
RESTRICTED_INTERVAL = timedelta(hours=12)
MAX_RESPONSE_ATTEMPTS = 10
PYCARWINGS2_SLEEP = 30
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_REGION): vol.In(CONF_VALID_REGIONS),
vol.Optional(CONF_INTERVAL, default=DEFAULT_INTERVAL): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))
),
vol.Optional(
CONF_CHARGING_INTERVAL, default=DEFAULT_CHARGING_INTERVAL
): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))
),
vol.Optional(
CONF_CLIMATE_INTERVAL, default=DEFAULT_CLIMATE_INTERVAL
): (
vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL))
),
vol.Optional(CONF_FORCE_MILES, default=False): cv.boolean,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["sensor", "switch", "binary_sensor"]
SIGNAL_UPDATE_LEAF = "nissan_leaf_update"
SERVICE_UPDATE_LEAF = "update"
SERVICE_START_CHARGE_LEAF = "start_charge"
ATTR_VIN = "vin"
UPDATE_LEAF_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
START_CHARGE_LEAF_SCHEMA = vol.Schema({vol.Required(ATTR_VIN): cv.string})
def setup(hass, config):
"""Set up the Nissan Leaf integration."""
async def async_handle_update(service):
"""Handle service to update leaf data from Nissan servers."""
# It would be better if this was changed to use nickname, or
# an entity name rather than a vin.
vin = service.data[ATTR_VIN]
if vin in hass.data[DATA_LEAF]:
data_store = hass.data[DATA_LEAF][vin]
await data_store.async_update_data(utcnow())
else:
_LOGGER.debug("Vin %s not recognised for update", vin)
async def async_handle_start_charge(service):
"""Handle service to start charging."""
# It would be better if this was changed to use nickname, or
# an entity name rather than a vin.
vin = service.data[ATTR_VIN]
if vin in hass.data[DATA_LEAF]:
data_store = hass.data[DATA_LEAF][vin]
# Send the command to request charging is started to Nissan
# servers. If that completes OK then trigger a fresh update to
# pull the charging status from the car after waiting a minute
# for the charging request to reach the car.
result = await hass.async_add_executor_job(data_store.leaf.start_charging)
if result:
_LOGGER.debug("Start charging sent, request updated data in 1 minute")
check_charge_at = utcnow() + timedelta(minutes=1)
data_store.next_update = check_charge_at
async_track_point_in_utc_time(
hass, data_store.async_update_data, check_charge_at
)
else:
_LOGGER.debug("Vin %s not recognised for update", vin)
def setup_leaf(car_config):
"""Set up a car."""
_LOGGER.debug("Logging into You+Nissan...")
username = car_config[CONF_USERNAME]
password = car_config[CONF_PASSWORD]
region = car_config[CONF_REGION]
leaf = None
try:
# This might need to be made async (somehow) causes
# homeassistant to be slow to start
sess = Session(username, password, region)
leaf = sess.get_leaf()
except KeyError:
_LOGGER.error(
"Unable to fetch car details..."
" do you actually have a Leaf connected to your account?"
)
return False
except CarwingsError:
_LOGGER.error(
"An unknown error occurred while connecting to Nissan: %s",
sys.exc_info()[0],
)
return False
_LOGGER.warning(
"WARNING: This may poll your Leaf too often, and drain the 12V"
" battery. If you drain your cars 12V battery it WILL NOT START"
" as the drive train battery won't connect."
" Don't set the intervals too low"
)
data_store = LeafDataStore(hass, leaf, car_config)
hass.data[DATA_LEAF][leaf.vin] = data_store
for platform in PLATFORMS:
load_platform(hass, platform, DOMAIN, {}, car_config)
async_track_point_in_utc_time(
hass, data_store.async_update_data, utcnow() + INITIAL_UPDATE
)
hass.data[DATA_LEAF] = {}
for car in config[DOMAIN]:
setup_leaf(car)
hass.services.register(
DOMAIN, SERVICE_UPDATE_LEAF, async_handle_update, schema=UPDATE_LEAF_SCHEMA
)
hass.services.register(
DOMAIN,
SERVICE_START_CHARGE_LEAF,
async_handle_start_charge,
schema=START_CHARGE_LEAF_SCHEMA,
)
return True
class LeafDataStore:
"""Nissan Leaf Data Store."""
def __init__(self, hass, leaf, car_config):
"""Initialise the data store."""
self.hass = hass
self.leaf = leaf
self.car_config = car_config
self.force_miles = car_config[CONF_FORCE_MILES]
self.data = {}
self.data[DATA_CLIMATE] = False
self.data[DATA_BATTERY] = 0
self.data[DATA_CHARGING] = False
self.data[DATA_RANGE_AC] = 0
self.data[DATA_RANGE_AC_OFF] = 0
self.data[DATA_PLUGGED_IN] = False
self.next_update = None
self.last_check = None
self.request_in_progress = False
# Timestamp of last successful response from battery or climate.
self.last_battery_response = None
self.last_climate_response = None
self._remove_listener = None
async def async_update_data(self, now):
"""Update data from nissan leaf."""
# Prevent against a previously scheduled update and an ad-hoc update
# started from an update from both being triggered.
if self._remove_listener:
self._remove_listener()
self._remove_listener = None
# Clear next update whilst this update is underway
self.next_update = None
await self.async_refresh_data(now)
self.next_update = self.get_next_interval()
_LOGGER.debug("Next update=%s", self.next_update)
self._remove_listener = async_track_point_in_utc_time(
self.hass, self.async_update_data, self.next_update
)
def get_next_interval(self):
"""Calculate when the next update should occur."""
base_interval = self.car_config[CONF_INTERVAL]
climate_interval = self.car_config[CONF_CLIMATE_INTERVAL]
charging_interval = self.car_config[CONF_CHARGING_INTERVAL]
# The 12V battery is used when communicating with Nissan servers.
# The 12V battery is charged from the traction battery when not
# connected and when the traction battery has enough charge. To
# avoid draining the 12V battery we shall restrict the update
# frequency if low battery detected.
if (
self.last_battery_response is not None
and self.data[DATA_CHARGING] is False
and self.data[DATA_BATTERY] <= RESTRICTED_BATTERY
):
_LOGGER.debug(
"Low battery so restricting refresh frequency (%s)", self.leaf.nickname
)
interval = RESTRICTED_INTERVAL
else:
intervals = [base_interval]
if self.data[DATA_CHARGING]:
intervals.append(charging_interval)
if self.data[DATA_CLIMATE]:
intervals.append(climate_interval)
interval = min(intervals)
return utcnow() + interval
async def async_refresh_data(self, now):
"""Refresh the leaf data and update the datastore."""
if self.request_in_progress:
_LOGGER.debug("Refresh currently in progress for %s", self.leaf.nickname)
return
_LOGGER.debug("Updating Nissan Leaf Data")
self.last_check = datetime.today()
self.request_in_progress = True
server_response = await self.async_get_battery()
if server_response is not None:
_LOGGER.debug("Server Response: %s", server_response.__dict__)
if server_response.answer["status"] == HTTP_OK:
self.data[DATA_BATTERY] = server_response.battery_percent
# pycarwings2 library doesn't always provide cruising rnages
# so we have to check if they exist before we can use them.
# Root cause: the nissan servers don't always send the data.
if hasattr(server_response, "cruising_range_ac_on_km"):
self.data[DATA_RANGE_AC] = server_response.cruising_range_ac_on_km
else:
self.data[DATA_RANGE_AC] = None
if hasattr(server_response, "cruising_range_ac_off_km"):
self.data[
DATA_RANGE_AC_OFF
] = server_response.cruising_range_ac_off_km
else:
self.data[DATA_RANGE_AC_OFF] = None
self.data[DATA_PLUGGED_IN] = server_response.is_connected
self.data[DATA_CHARGING] = server_response.is_charging
async_dispatcher_send(self.hass, SIGNAL_UPDATE_LEAF)
self.last_battery_response = utcnow()
# Climate response only updated if battery data updated first.
if server_response is not None:
try:
climate_response = await self.async_get_climate()
if climate_response is not None:
_LOGGER.debug(
"Got climate data for Leaf: %s", climate_response.__dict__
)
self.data[DATA_CLIMATE] = climate_response.is_hvac_running
self.last_climate_response = utcnow()
except CarwingsError:
_LOGGER.error("Error fetching climate info")
self.request_in_progress = False
async_dispatcher_send(self.hass, SIGNAL_UPDATE_LEAF)
@staticmethod
def _extract_start_date(battery_info):
"""Extract the server date from the battery response."""
try:
return battery_info.answer["BatteryStatusRecords"]["OperationDateAndTime"]
except KeyError:
return None
async def async_get_battery(self):
"""Request battery update from Nissan servers."""
try:
# Request battery update from the car
_LOGGER.debug("Requesting battery update, %s", self.leaf.vin)
request = await self.hass.async_add_executor_job(self.leaf.request_update)
if not request:
_LOGGER.error("Battery update request failed")
return None
for attempt in range(MAX_RESPONSE_ATTEMPTS):
_LOGGER.debug(
"Waiting %s seconds for battery update (%s) (%s)",
PYCARWINGS2_SLEEP,
self.leaf.vin,
attempt,
)
await asyncio.sleep(PYCARWINGS2_SLEEP)
# We don't use the response from get_status_from_update
# apart from knowing that the car has responded saying it
# has given the latest battery status to Nissan.
check_result_info = await self.hass.async_add_executor_job(
self.leaf.get_status_from_update, request
)
if check_result_info is not None:
# Get the latest battery status from Nissan servers.
# This has the SOC in it.
server_info = await self.hass.async_add_executor_job(
self.leaf.get_latest_battery_status
)
return server_info
_LOGGER.debug(
"%s attempts exceeded return latest data from server",
MAX_RESPONSE_ATTEMPTS,
)
# Get the latest data from the nissan servers, even though
# it may be out of date, it's better than nothing.
server_info = await self.hass.async_add_executor_job(
self.leaf.get_latest_battery_status
)
return server_info
except CarwingsError:
_LOGGER.error("An error occurred getting battery status")
return None
except KeyError:
_LOGGER.error("An error occurred parsing response from server")
return None
async def async_get_climate(self):
"""Request climate data from Nissan servers."""
try:
return await self.hass.async_add_executor_job(
self.leaf.get_latest_hvac_status
)
except CarwingsError:
_LOGGER.error(
"An error occurred communicating with the car %s", self.leaf.vin
)
return None
async def async_set_climate(self, toggle):
"""Set climate control mode via Nissan servers."""
climate_result = None
if toggle:
_LOGGER.debug("Requesting climate turn on for %s", self.leaf.vin)
set_function = self.leaf.start_climate_control
result_function = self.leaf.get_start_climate_control_result
else:
_LOGGER.debug("Requesting climate turn off for %s", self.leaf.vin)
set_function = self.leaf.stop_climate_control
result_function = self.leaf.get_stop_climate_control_result
request = await self.hass.async_add_executor_job(set_function)
for attempt in range(MAX_RESPONSE_ATTEMPTS):
if attempt > 0:
_LOGGER.debug(
"Climate data not in yet (%s) (%s). Waiting (%s) seconds",
self.leaf.vin,
attempt,
PYCARWINGS2_SLEEP,
)
await asyncio.sleep(PYCARWINGS2_SLEEP)
climate_result = await self.hass.async_add_executor_job(
result_function, request
)
if climate_result is not None:
break
if climate_result is not None:
_LOGGER.debug("Climate result: %s", climate_result.__dict__)
async_dispatcher_send(self.hass, SIGNAL_UPDATE_LEAF)
return climate_result.is_hvac_running == toggle
_LOGGER.debug("Climate result not returned by Nissan servers")
return False
class LeafEntity(Entity):
"""Base class for Nissan Leaf entity."""
def __init__(self, car):
"""Store LeafDataStore upon init."""
self.car = car
def log_registration(self):
"""Log registration."""
_LOGGER.debug(
"Registered %s integration for VIN %s",
self.__class__.__name__,
self.car.leaf.vin,
)
@property
def device_state_attributes(self):
"""Return default attributes for Nissan leaf entities."""
return {
"next_update": self.car.next_update,
"last_attempt": self.car.last_check,
"updated_on": self.car.last_battery_response,
"update_in_progress": self.car.request_in_progress,
"vin": self.car.leaf.vin,
}
async def async_added_to_hass(self):
"""Register callbacks."""
self.log_registration()
self.async_on_remove(
async_dispatcher_connect(
self.car.hass, SIGNAL_UPDATE_LEAF, self._update_callback
)
)
@callback
def _update_callback(self):
"""Update the state."""
self.async_schedule_update_ha_state(True)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.